]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs4_vnops.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <nfs/nfs_conf.h>
30#if CONFIG_NFS_CLIENT
31
32/*
33 * vnode op calls for NFS version 4
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/resourcevar.h>
39#include <sys/proc_internal.h>
40#include <sys/kauth.h>
41#include <sys/mount_internal.h>
42#include <sys/malloc.h>
43#include <sys/kpi_mbuf.h>
44#include <sys/conf.h>
45#include <sys/vnode_internal.h>
46#include <sys/dirent.h>
47#include <sys/fcntl.h>
48#include <sys/lockf.h>
49#include <sys/ubc_internal.h>
50#include <sys/attr.h>
51#include <sys/signalvar.h>
52#include <sys/uio_internal.h>
53#include <sys/xattr.h>
54#include <sys/paths.h>
55
56#include <vfs/vfs_support.h>
57
58#include <sys/vm.h>
59
60#include <sys/time.h>
61#include <kern/clock.h>
62#include <libkern/OSAtomic.h>
63
64#include <miscfs/fifofs/fifo.h>
65#include <miscfs/specfs/specdev.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfs_gss.h>
72#include <nfs/nfsmount.h>
73#include <nfs/nfs_lock.h>
74#include <nfs/xdr_subs.h>
75#include <nfs/nfsm_subs.h>
76
77#include <net/if.h>
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <vm/vm_kern.h>
81
82#include <kern/task.h>
83#include <kern/sched_prim.h>
84
85#if CONFIG_NFS4
86int
87nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
88{
89 int error = 0, lockerror = ENOENT, status, numops, slot;
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
93 uint32_t access_result = 0, supported = 0, missing;
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
97 struct nfsreq_secinfo_args si;
98
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
116 nfsm_chain_add_32(error, &nmreq, *access);
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
126
127 if ((lockerror = nfs_node_lock(np))) {
128 error = lockerror;
129 }
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
135 nfsm_chain_get_32(error, &nmrep, access_result);
136 nfsmout_if(error);
137 if ((missing = (*access & ~supported))) {
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
146 access_result |= NFS_ACCESS_DELETE;
147 }
148 }
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
156 }
157 if (dvp != NULLVP) {
158 vnode_put(dvp);
159 }
160 }
161 /* Some servers report DELETE support but erroneously give a denied answer. */
162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
163 access_result |= NFS_ACCESS_DELETE;
164 }
165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
167 nfsmout_if(error);
168
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
181 microuptime(&now);
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
184
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
187nfsmout:
188 if (!lockerror) {
189 nfs_node_unlock(np);
190 }
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
193 return error;
194}
195
196int
197nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
202 int flags,
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206{
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
210 struct nfsm_chain nmreq, nmrep;
211 struct nfsreq_secinfo_args si;
212
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
216 nfsvers = nmp->nm_vers;
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
221 return 0;
222 }
223
224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
225 rpcflags = R_RECOVER;
226 }
227
228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
229 rpcflags |= R_SOFT;
230 }
231
232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
236 // PUTFH, GETATTR
237 numops = 2;
238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
246 if ((flags & NGA_ACL) && acls) {
247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
248 }
249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
269nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
272 return error;
273}
274
275int
276nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
277{
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
280 size_t len = 0;
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
283 struct nfsreq_secinfo_args si;
284
285 nmp = NFSTONMP(np);
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
312
313 if ((lockerror = nfs_node_lock(np))) {
314 error = lockerror;
315 }
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
325 if (np->n_size && (np->n_size < *buflenp)) {
326 len = np->n_size;
327 } else {
328 len = *buflenp - 1;
329 }
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
332 if (!error) {
333 *buflenp = len;
334 }
335nfsmout:
336 if (!lockerror) {
337 nfs_node_unlock(np);
338 }
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
341 return error;
342}
343
344int
345nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353{
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
356 nfs_stateid stateid;
357 struct nfsm_chain nmreq;
358 struct nfsreq_secinfo_args si;
359
360 nmp = NFSTONMP(np);
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
364 nfsvers = nmp->nm_vers;
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
368
369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
370 nfsm_chain_null(&nmreq);
371
372 // PUTFH, READ
373 numops = 2;
374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
391 return error;
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
398 uio_t uio,
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
409 if (nfs_mount_gone(nmp)) {
410 nfs_request_async_cancel(req);
411 return ENXIO;
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
421
422 if ((lockerror = nfs_node_lock(np))) {
423 error = lockerror;
424 }
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
434 }
435 if (!lockerror) {
436 nfs_node_unlock(np);
437 }
438 if (eofp) {
439 if (!eof && !retlen) {
440 eof = 1;
441 }
442 *eofp = eof;
443 }
444 nfsm_chain_cleanup(&nmrep);
445 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
446 microuptime(&np->n_lastio);
447 }
448 return error;
449}
450
451int
452nfs4_write_rpc_async(
453 nfsnode_t np,
454 uio_t uio,
455 size_t len,
456 thread_t thd,
457 kauth_cred_t cred,
458 int iomode,
459 struct nfsreq_cbinfo *cb,
460 struct nfsreq **reqp)
461{
462 struct nfsmount *nmp;
463 mount_t mp;
464 int error = 0, nfsvers, numops;
465 nfs_stateid stateid;
466 struct nfsm_chain nmreq;
467 struct nfsreq_secinfo_args si;
468
469 nmp = NFSTONMP(np);
470 if (nfs_mount_gone(nmp)) {
471 return ENXIO;
472 }
473 nfsvers = nmp->nm_vers;
474 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
475 return EINVAL;
476 }
477
478 /* for async mounts, don't bother sending sync write requests */
479 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
480 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
481 iomode = NFS_WRITE_UNSTABLE;
482 }
483
484 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
485 nfsm_chain_null(&nmreq);
486
487 // PUTFH, WRITE, GETATTR
488 numops = 3;
489 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
490 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
491 numops--;
492 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
493 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
494 numops--;
495 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
496 nfs_get_stateid(np, thd, cred, &stateid);
497 nfsm_chain_add_stateid(error, &nmreq, &stateid);
498 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
499 nfsm_chain_add_32(error, &nmreq, iomode);
500 nfsm_chain_add_32(error, &nmreq, len);
501 if (!error) {
502 error = nfsm_chain_add_uio(&nmreq, uio, len);
503 }
504 numops--;
505 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
506 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np);
507 nfsm_chain_build_done(error, &nmreq);
508 nfsm_assert(error, (numops == 0), EPROTO);
509 nfsmout_if(error);
510
511 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
512nfsmout:
513 nfsm_chain_cleanup(&nmreq);
514 return error;
515}
516
517int
518nfs4_write_rpc_async_finish(
519 nfsnode_t np,
520 struct nfsreq *req,
521 int *iomodep,
522 size_t *rlenp,
523 uint64_t *wverfp)
524{
525 struct nfsmount *nmp;
526 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
527 int committed = NFS_WRITE_FILESYNC;
528 size_t rlen = 0;
529 u_int64_t xid, wverf;
530 mount_t mp;
531 struct nfsm_chain nmrep;
532
533 nmp = NFSTONMP(np);
534 if (nfs_mount_gone(nmp)) {
535 nfs_request_async_cancel(req);
536 return ENXIO;
537 }
538 nfsvers = nmp->nm_vers;
539
540 nfsm_chain_null(&nmrep);
541
542 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
543 if (error == EINPROGRESS) { /* async request restarted */
544 return error;
545 }
546 nmp = NFSTONMP(np);
547 if (nfs_mount_gone(nmp)) {
548 error = ENXIO;
549 }
550 if (!error && (lockerror = nfs_node_lock(np))) {
551 error = lockerror;
552 }
553 nfsm_chain_skip_tag(error, &nmrep);
554 nfsm_chain_get_32(error, &nmrep, numops);
555 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
556 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
557 nfsm_chain_get_32(error, &nmrep, rlen);
558 nfsmout_if(error);
559 *rlenp = rlen;
560 if (rlen <= 0) {
561 error = NFSERR_IO;
562 }
563 nfsm_chain_get_32(error, &nmrep, committed);
564 nfsm_chain_get_64(error, &nmrep, wverf);
565 nfsmout_if(error);
566 if (wverfp) {
567 *wverfp = wverf;
568 }
569 lck_mtx_lock(&nmp->nm_lock);
570 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
571 nmp->nm_verf = wverf;
572 nmp->nm_state |= NFSSTA_HASWRITEVERF;
573 } else if (nmp->nm_verf != wverf) {
574 nmp->nm_verf = wverf;
575 }
576 lck_mtx_unlock(&nmp->nm_lock);
577 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
578
579 /*
580 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
581 * In such cases, we do not update the time stamp - but the requested attributes.
582 */
583 np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE;
584 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
585 np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE;
586
587nfsmout:
588 if (!lockerror) {
589 nfs_node_unlock(np);
590 }
591 nfsm_chain_cleanup(&nmrep);
592 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
593 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
594 committed = NFS_WRITE_FILESYNC;
595 }
596 *iomodep = committed;
597 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
598 microuptime(&np->n_lastio);
599 }
600 return error;
601}
602
603int
604nfs4_remove_rpc(
605 nfsnode_t dnp,
606 char *name,
607 int namelen,
608 thread_t thd,
609 kauth_cred_t cred)
610{
611 int error = 0, lockerror = ENOENT, remove_error = 0, status;
612 struct nfsmount *nmp;
613 int nfsvers, numops;
614 u_int64_t xid;
615 struct nfsm_chain nmreq, nmrep;
616 struct nfsreq_secinfo_args si;
617
618 nmp = NFSTONMP(dnp);
619 if (nfs_mount_gone(nmp)) {
620 return ENXIO;
621 }
622 nfsvers = nmp->nm_vers;
623 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
624 return EINVAL;
625 }
626 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
627restart:
628 nfsm_chain_null(&nmreq);
629 nfsm_chain_null(&nmrep);
630
631 // PUTFH, REMOVE, GETATTR
632 numops = 3;
633 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
634 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
637 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
640 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
641 numops--;
642 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
643 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
644 nfsm_chain_build_done(error, &nmreq);
645 nfsm_assert(error, (numops == 0), EPROTO);
646 nfsmout_if(error);
647
648 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
649
650 if ((lockerror = nfs_node_lock(dnp))) {
651 error = lockerror;
652 }
653 nfsm_chain_skip_tag(error, &nmrep);
654 nfsm_chain_get_32(error, &nmrep, numops);
655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
657 remove_error = error;
658 nfsm_chain_check_change_info(error, &nmrep, dnp);
659 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
660 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
661 if (error && !lockerror) {
662 NATTRINVALIDATE(dnp);
663 }
664nfsmout:
665 nfsm_chain_cleanup(&nmreq);
666 nfsm_chain_cleanup(&nmrep);
667
668 if (!lockerror) {
669 dnp->n_flag |= NMODIFIED;
670 nfs_node_unlock(dnp);
671 }
672 if (error == NFSERR_GRACE) {
673 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
674 goto restart;
675 }
676
677 return remove_error;
678}
679
680int
681nfs4_rename_rpc(
682 nfsnode_t fdnp,
683 char *fnameptr,
684 int fnamelen,
685 nfsnode_t tdnp,
686 char *tnameptr,
687 int tnamelen,
688 vfs_context_t ctx)
689{
690 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
691 struct nfsmount *nmp;
692 u_int64_t xid, savedxid;
693 struct nfsm_chain nmreq, nmrep;
694 struct nfsreq_secinfo_args si;
695
696 nmp = NFSTONMP(fdnp);
697 if (nfs_mount_gone(nmp)) {
698 return ENXIO;
699 }
700 nfsvers = nmp->nm_vers;
701 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
704 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
705 return EINVAL;
706 }
707
708 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
709 nfsm_chain_null(&nmreq);
710 nfsm_chain_null(&nmrep);
711
712 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
713 numops = 7;
714 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
715 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
718 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
719 numops--;
720 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
723 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
724 numops--;
725 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
726 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
727 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
730 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
731 numops--;
732 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
733 numops--;
734 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
735 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
736 nfsm_chain_build_done(error, &nmreq);
737 nfsm_assert(error, (numops == 0), EPROTO);
738 nfsmout_if(error);
739
740 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
741
742 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
743 error = lockerror;
744 }
745 nfsm_chain_skip_tag(error, &nmrep);
746 nfsm_chain_get_32(error, &nmrep, numops);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
749 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
750 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
751 nfsm_chain_check_change_info(error, &nmrep, fdnp);
752 nfsm_chain_check_change_info(error, &nmrep, tdnp);
753 /* directory attributes: if we don't get them, make sure to invalidate */
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
755 savedxid = xid;
756 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
757 if (error && !lockerror) {
758 NATTRINVALIDATE(tdnp);
759 }
760 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
761 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
762 xid = savedxid;
763 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
764 if (error && !lockerror) {
765 NATTRINVALIDATE(fdnp);
766 }
767nfsmout:
768 nfsm_chain_cleanup(&nmreq);
769 nfsm_chain_cleanup(&nmrep);
770 if (!lockerror) {
771 fdnp->n_flag |= NMODIFIED;
772 tdnp->n_flag |= NMODIFIED;
773 nfs_node_unlock2(fdnp, tdnp);
774 }
775 return error;
776}
777
778/*
779 * NFS V4 readdir RPC.
780 */
781int
782nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
783{
784 struct nfsmount *nmp;
785 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
786 int i, status, more_entries = 1, eof, bp_dropped = 0;
787 uint16_t namlen, reclen;
788 uint32_t nmreaddirsize, nmrsize;
789 uint32_t namlen32, skiplen, fhlen, xlen, attrlen;
790 uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed;
791 struct nfsm_chain nmreq, nmrep, nmrepsave;
792 fhandle_t *fh;
793 struct nfs_vattr *nvattr, *nvattrp;
794 struct nfs_dir_buf_header *ndbhp;
795 struct direntry *dp;
796 char *padstart;
797 const char *tag;
798 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
799 struct timeval now;
800 struct nfsreq_secinfo_args si;
801
802 nmp = NFSTONMP(dnp);
803 if (nfs_mount_gone(nmp)) {
804 return ENXIO;
805 }
806 nfsvers = nmp->nm_vers;
807 nmreaddirsize = nmp->nm_readdirsize;
808 nmrsize = nmp->nm_rsize;
809 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
810 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
811 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
812 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
813 return EINVAL;
814 }
815 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
816
817 /*
818 * Set up attribute request for entries.
819 * For READDIRPLUS functionality, get everything.
820 * Otherwise, just get what we need for struct direntry.
821 */
822 if (rdirplus) {
823 tag = "readdirplus";
824 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
826 } else {
827 tag = "readdir";
828 NFS_CLEAR_ATTRIBUTES(entry_attrs);
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
830 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
831 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
832 }
833 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
834
835 /* lock to protect access to cookie verifier */
836 if ((lockerror = nfs_node_lock(dnp))) {
837 return lockerror;
838 }
839
840 fh = zalloc(nfs_fhandle_zone);
841 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
842
843 /* determine cookie to use, and move dp to the right offset */
844 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
845 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
846 if (ndbhp->ndbh_count) {
847 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
848 dp = NFS_DIRENTRY_NEXT(dp);
849 }
850 cookie = dp->d_seekoff;
851 dp = NFS_DIRENTRY_NEXT(dp);
852 } else {
853 cookie = bp->nb_lblkno;
854 /* increment with every buffer read */
855 OSAddAtomic64(1, &nfsstats.readdir_bios);
856 }
857 lastcookie = cookie;
858
859 /*
860 * The NFS client is responsible for the "." and ".." entries in the
861 * directory. So, we put them at the start of the first buffer.
862 * Don't bother for attribute directories.
863 */
864 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
865 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
866 fh->fh_len = 0;
867 fhlen = rdirplus ? fh->fh_len + 1 : 0;
868 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
869 /* "." */
870 namlen = 1;
871 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
872 if (xlen) {
873 bzero(&dp->d_name[namlen + 1], xlen);
874 }
875 dp->d_namlen = namlen;
876 strlcpy(dp->d_name, ".", namlen + 1);
877 dp->d_fileno = dnp->n_vattr.nva_fileid;
878 dp->d_type = DT_DIR;
879 dp->d_reclen = reclen;
880 dp->d_seekoff = 1;
881 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
882 dp = NFS_DIRENTRY_NEXT(dp);
883 padlen = (char*)dp - padstart;
884 if (padlen > 0) {
885 bzero(padstart, padlen);
886 }
887 if (rdirplus) { /* zero out attributes */
888 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
889 }
890
891 /* ".." */
892 namlen = 2;
893 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
894 if (xlen) {
895 bzero(&dp->d_name[namlen + 1], xlen);
896 }
897 dp->d_namlen = namlen;
898 strlcpy(dp->d_name, "..", namlen + 1);
899 if (dnp->n_parent) {
900 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
901 } else {
902 dp->d_fileno = dnp->n_vattr.nva_fileid;
903 }
904 dp->d_type = DT_DIR;
905 dp->d_reclen = reclen;
906 dp->d_seekoff = 2;
907 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
908 dp = NFS_DIRENTRY_NEXT(dp);
909 padlen = (char*)dp - padstart;
910 if (padlen > 0) {
911 bzero(padstart, padlen);
912 }
913 if (rdirplus) { /* zero out attributes */
914 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
915 }
916
917 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
918 ndbhp->ndbh_count = 2;
919 }
920
921 /*
922 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
923 * the buffer is full (or we hit EOF). Then put the remainder of the
924 * results in the next buffer(s).
925 */
926 nfsm_chain_null(&nmreq);
927 nfsm_chain_null(&nmrep);
928 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
929 // PUTFH, GETATTR, READDIR
930 numops = 3;
931 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
932 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
933 numops--;
934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
935 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
936 numops--;
937 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
938 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
939 numops--;
940 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
941 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
942 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
943 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
944 nfsm_chain_add_32(error, &nmreq, nmrsize);
945 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
946 nfsm_chain_build_done(error, &nmreq);
947 nfsm_assert(error, (numops == 0), EPROTO);
948 nfs_node_unlock(dnp);
949 nfsmout_if(error);
950 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
951
952 if ((lockerror = nfs_node_lock(dnp))) {
953 error = lockerror;
954 }
955
956 savedxid = xid;
957 nfsm_chain_skip_tag(error, &nmrep);
958 nfsm_chain_get_32(error, &nmrep, numops);
959 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
960 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
961 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
962 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
963 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
964 nfsm_chain_get_32(error, &nmrep, more_entries);
965
966 if (!lockerror) {
967 nfs_node_unlock(dnp);
968 lockerror = ENOENT;
969 }
970 nfsmout_if(error);
971
972 if (rdirplus) {
973 microuptime(&now);
974 if (lastcookie == 0) {
975 dnp->n_rdirplusstamp_sof = now.tv_sec;
976 dnp->n_rdirplusstamp_eof = 0;
977 }
978 }
979
980 /* loop through the entries packing them into the buffer */
981 while (more_entries) {
982 /* Entry: COOKIE, NAME, FATTR */
983 nfsm_chain_get_64(error, &nmrep, cookie);
984 nfsm_chain_get_32(error, &nmrep, namlen32);
985 if (namlen32 > UINT16_MAX) {
986 error = EBADRPC;
987 goto nfsmout;
988 }
989 namlen = (uint16_t)namlen32;
990 nfsmout_if(error);
991 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
992 /* we've got a big cookie, make sure flag is set */
993 lck_mtx_lock(&nmp->nm_lock);
994 nmp->nm_state |= NFSSTA_BIGCOOKIES;
995 lck_mtx_unlock(&nmp->nm_lock);
996 bigcookies = 1;
997 }
998 /* just truncate names that don't fit in direntry.d_name */
999 if (namlen <= 0) {
1000 error = EBADRPC;
1001 goto nfsmout;
1002 }
1003 if (namlen > (sizeof(dp->d_name) - 1)) {
1004 skiplen = namlen - sizeof(dp->d_name) + 1;
1005 namlen = sizeof(dp->d_name) - 1;
1006 } else {
1007 skiplen = 0;
1008 }
1009 /* guess that fh size will be same as parent */
1010 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
1011 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1012 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
1013 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1014 space_needed = reclen + attrlen;
1015 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1016 if (space_needed > space_free) {
1017 /*
1018 * We still have entries to pack, but we've
1019 * run out of room in the current buffer.
1020 * So we need to move to the next buffer.
1021 * The block# for the next buffer is the
1022 * last cookie in the current buffer.
1023 */
1024nextbuffer:
1025 ndbhp->ndbh_flags |= NDB_FULL;
1026 nfs_buf_release(bp, 0);
1027 bp_dropped = 1;
1028 bp = NULL;
1029 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1030 nfsmout_if(error);
1031 /* initialize buffer */
1032 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1033 ndbhp->ndbh_flags = 0;
1034 ndbhp->ndbh_count = 0;
1035 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1036 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1037 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1038 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1039 /* increment with every buffer read */
1040 OSAddAtomic64(1, &nfsstats.readdir_bios);
1041 }
1042 nmrepsave = nmrep;
1043 dp->d_fileno = cookie; /* placeholder */
1044 dp->d_seekoff = cookie;
1045 dp->d_namlen = namlen;
1046 dp->d_reclen = reclen;
1047 dp->d_type = DT_UNKNOWN;
1048 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1049 nfsmout_if(error);
1050 dp->d_name[namlen] = '\0';
1051 if (skiplen) {
1052 nfsm_chain_adv(error, &nmrep,
1053 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1054 }
1055 nfsmout_if(error);
1056 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr;
1057 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL);
1058 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1059 /* we do NOT want ACLs returned to us here */
1060 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1061 if (nvattrp->nva_acl) {
1062 kauth_acl_free(nvattrp->nva_acl);
1063 nvattrp->nva_acl = NULL;
1064 }
1065 }
1066 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1067 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1068 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1069 /* set this up to look like a referral trigger */
1070 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh);
1071 }
1072 error = 0;
1073 }
1074 /* check for more entries after this one */
1075 nfsm_chain_get_32(error, &nmrep, more_entries);
1076 nfsmout_if(error);
1077
1078 /* Skip any "." and ".." entries returned from server. */
1079 /* Also skip any bothersome named attribute entries. */
1080 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1081 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1082 lastcookie = cookie;
1083 continue;
1084 }
1085
1086 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1087 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1088 }
1089 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1090 dp->d_fileno = nvattrp->nva_fileid;
1091 }
1092 if (rdirplus) {
1093 /* fileid is already in d_fileno, so stash xid in attrs */
1094 nvattrp->nva_fileid = savedxid;
1095 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
1096 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1097 fhlen = fh->fh_len + 1;
1098 xlen = fhlen + sizeof(time_t);
1099 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1100 space_needed = reclen + attrlen;
1101 if (space_needed > space_free) {
1102 /* didn't actually have the room... move on to next buffer */
1103 nmrep = nmrepsave;
1104 goto nextbuffer;
1105 }
1106 /* pack the file handle into the record */
1107 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1108 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
1109 } else {
1110 /* mark the file handle invalid */
1111 fh->fh_len = 0;
1112 fhlen = fh->fh_len + 1;
1113 xlen = fhlen + sizeof(time_t);
1114 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1115 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1116 }
1117 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1118 dp->d_reclen = reclen;
1119 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
1120 }
1121 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1122 ndbhp->ndbh_count++;
1123 lastcookie = cookie;
1124
1125 /* advance to next direntry in buffer */
1126 dp = NFS_DIRENTRY_NEXT(dp);
1127 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1128 /* zero out the pad bytes */
1129 padlen = (char*)dp - padstart;
1130 if (padlen > 0) {
1131 bzero(padstart, padlen);
1132 }
1133 }
1134 /* Finally, get the eof boolean */
1135 nfsm_chain_get_32(error, &nmrep, eof);
1136 nfsmout_if(error);
1137 if (eof) {
1138 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1139 nfs_node_lock_force(dnp);
1140 dnp->n_eofcookie = lastcookie;
1141 if (rdirplus) {
1142 dnp->n_rdirplusstamp_eof = now.tv_sec;
1143 }
1144 nfs_node_unlock(dnp);
1145 } else {
1146 more_entries = 1;
1147 }
1148 if (bp_dropped) {
1149 nfs_buf_release(bp, 0);
1150 bp = NULL;
1151 break;
1152 }
1153 if ((lockerror = nfs_node_lock(dnp))) {
1154 error = lockerror;
1155 }
1156 nfsmout_if(error);
1157 nfsm_chain_cleanup(&nmrep);
1158 nfsm_chain_null(&nmreq);
1159 }
1160nfsmout:
1161 if (bp_dropped && bp) {
1162 nfs_buf_release(bp, 0);
1163 }
1164 if (!lockerror) {
1165 nfs_node_unlock(dnp);
1166 }
1167 nfsm_chain_cleanup(&nmreq);
1168 nfsm_chain_cleanup(&nmrep);
1169 NFS_ZFREE(nfs_fhandle_zone, fh);
1170 FREE(nvattr, M_TEMP);
1171 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1172}
1173
1174int
1175nfs4_lookup_rpc_async(
1176 nfsnode_t dnp,
1177 char *name,
1178 int namelen,
1179 vfs_context_t ctx,
1180 struct nfsreq **reqp)
1181{
1182 int error = 0, isdotdot = 0, nfsvers, numops;
1183 struct nfsm_chain nmreq;
1184 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1185 struct nfsmount *nmp;
1186 struct nfsreq_secinfo_args si;
1187
1188 nmp = NFSTONMP(dnp);
1189 if (nfs_mount_gone(nmp)) {
1190 return ENXIO;
1191 }
1192 nfsvers = nmp->nm_vers;
1193 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1194 return EINVAL;
1195 }
1196
1197 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1198 isdotdot = 1;
1199 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1200 } else {
1201 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1202 }
1203
1204 nfsm_chain_null(&nmreq);
1205
1206 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1207 numops = 5;
1208 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1209 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1212 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1213 numops--;
1214 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1215 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1216 numops--;
1217 if (isdotdot) {
1218 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1219 } else {
1220 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1221 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1222 }
1223 numops--;
1224 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1225 numops--;
1226 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1227 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1228 /* some ".zfs" directories can't handle being asked for some attributes */
1229 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1230 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1231 }
1232 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1233 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1234 }
1235 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1236 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1237 }
1238 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1239 nfsm_chain_build_done(error, &nmreq);
1240 nfsm_assert(error, (numops == 0), EPROTO);
1241 nfsmout_if(error);
1242 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1243 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1244nfsmout:
1245 nfsm_chain_cleanup(&nmreq);
1246 return error;
1247}
1248
1249
1250int
1251nfs4_lookup_rpc_async_finish(
1252 nfsnode_t dnp,
1253 char *name,
1254 int namelen,
1255 vfs_context_t ctx,
1256 struct nfsreq *req,
1257 u_int64_t *xidp,
1258 fhandle_t *fhp,
1259 struct nfs_vattr *nvap)
1260{
1261 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1262 uint32_t op = NFS_OP_LOOKUP;
1263 u_int64_t xid;
1264 struct nfsmount *nmp;
1265 struct nfsm_chain nmrep;
1266
1267 nmp = NFSTONMP(dnp);
1268 if (nmp == NULL) {
1269 return ENXIO;
1270 }
1271 nfsvers = nmp->nm_vers;
1272 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1273 isdotdot = 1;
1274 }
1275
1276 nfsm_chain_null(&nmrep);
1277
1278 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1279
1280 if ((lockerror = nfs_node_lock(dnp))) {
1281 error = lockerror;
1282 }
1283 nfsm_chain_skip_tag(error, &nmrep);
1284 nfsm_chain_get_32(error, &nmrep, numops);
1285 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1287 if (xidp) {
1288 *xidp = xid;
1289 }
1290 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1291
1292 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1293 nfsmout_if(error || !fhp || !nvap);
1294 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1295 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1296 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1297 error = EBADRPC;
1298 }
1299 nfsmout_if(error);
1300 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1301 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1302 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1303 /* set this up to look like a referral trigger */
1304 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1305 error = 0;
1306 } else {
1307 nfsmout_if(error);
1308 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1309 }
1310nfsmout:
1311 if (!lockerror) {
1312 nfs_node_unlock(dnp);
1313 }
1314 nfsm_chain_cleanup(&nmrep);
1315 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1316 /* We still need to get SECINFO to set default for mount. */
1317 /* Do so for the first LOOKUP that returns successfully. */
1318 struct nfs_sec sec;
1319
1320 sec.count = NX_MAX_SEC_FLAVORS;
1321 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1322 /* [sigh] some implementations return "illegal" error for unsupported ops */
1323 if (error == NFSERR_OP_ILLEGAL) {
1324 error = 0;
1325 }
1326 if (!error) {
1327 /* set our default security flavor to the first in the list */
1328 lck_mtx_lock(&nmp->nm_lock);
1329 if (sec.count) {
1330 nmp->nm_auth = sec.flavors[0];
1331 }
1332 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1333 lck_mtx_unlock(&nmp->nm_lock);
1334 }
1335 }
1336 return error;
1337}
1338
1339int
1340nfs4_commit_rpc(
1341 nfsnode_t np,
1342 uint64_t offset,
1343 uint64_t count,
1344 kauth_cred_t cred,
1345 uint64_t wverf)
1346{
1347 struct nfsmount *nmp;
1348 int error = 0, lockerror, status, nfsvers, numops;
1349 u_int64_t xid, newwverf;
1350 uint32_t count32;
1351 struct nfsm_chain nmreq, nmrep;
1352 struct nfsreq_secinfo_args si;
1353
1354 nmp = NFSTONMP(np);
1355 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1356 if (nfs_mount_gone(nmp)) {
1357 return ENXIO;
1358 }
1359 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1360 return EINVAL;
1361 }
1362 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1363 return 0;
1364 }
1365 nfsvers = nmp->nm_vers;
1366 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
1367
1368 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1369 nfsm_chain_null(&nmreq);
1370 nfsm_chain_null(&nmrep);
1371
1372 // PUTFH, COMMIT, GETATTR
1373 numops = 3;
1374 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1375 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1376 numops--;
1377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1379 numops--;
1380 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1381 nfsm_chain_add_64(error, &nmreq, offset);
1382 nfsm_chain_add_32(error, &nmreq, count32);
1383 numops--;
1384 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1385 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1386 nfsm_chain_build_done(error, &nmreq);
1387 nfsm_assert(error, (numops == 0), EPROTO);
1388 nfsmout_if(error);
1389 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1390 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1391
1392 if ((lockerror = nfs_node_lock(np))) {
1393 error = lockerror;
1394 }
1395 nfsm_chain_skip_tag(error, &nmrep);
1396 nfsm_chain_get_32(error, &nmrep, numops);
1397 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1398 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1399 nfsm_chain_get_64(error, &nmrep, newwverf);
1400 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1402 if (!lockerror) {
1403 nfs_node_unlock(np);
1404 }
1405 nfsmout_if(error);
1406 lck_mtx_lock(&nmp->nm_lock);
1407 if (nmp->nm_verf != newwverf) {
1408 nmp->nm_verf = newwverf;
1409 }
1410 if (wverf != newwverf) {
1411 error = NFSERR_STALEWRITEVERF;
1412 }
1413 lck_mtx_unlock(&nmp->nm_lock);
1414nfsmout:
1415 nfsm_chain_cleanup(&nmreq);
1416 nfsm_chain_cleanup(&nmrep);
1417 return error;
1418}
1419
1420int
1421nfs4_pathconf_rpc(
1422 nfsnode_t np,
1423 struct nfs_fsattr *nfsap,
1424 vfs_context_t ctx)
1425{
1426 u_int64_t xid;
1427 int error = 0, lockerror, status, nfsvers, numops;
1428 struct nfsm_chain nmreq, nmrep;
1429 struct nfsmount *nmp = NFSTONMP(np);
1430 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1431 struct nfs_vattr *nvattr;
1432 struct nfsreq_secinfo_args si;
1433
1434 if (nfs_mount_gone(nmp)) {
1435 return ENXIO;
1436 }
1437 nfsvers = nmp->nm_vers;
1438 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1439 return EINVAL;
1440 }
1441
1442 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1443 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
1444 NVATTR_INIT(nvattr);
1445 nfsm_chain_null(&nmreq);
1446 nfsm_chain_null(&nmrep);
1447
1448 /* NFSv4: fetch "pathconf" info for this node */
1449 // PUTFH, GETATTR
1450 numops = 2;
1451 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1452 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1453 numops--;
1454 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1455 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1456 numops--;
1457 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1459 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1460 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1461 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1462 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1463 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1464 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1465 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1466 nfsm_chain_build_done(error, &nmreq);
1467 nfsm_assert(error, (numops == 0), EPROTO);
1468 nfsmout_if(error);
1469 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1470
1471 nfsm_chain_skip_tag(error, &nmrep);
1472 nfsm_chain_get_32(error, &nmrep, numops);
1473 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1474 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1475 nfsmout_if(error);
1476 error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL);
1477 nfsmout_if(error);
1478 if ((lockerror = nfs_node_lock(np))) {
1479 error = lockerror;
1480 }
1481 if (!error) {
1482 nfs_loadattrcache(np, nvattr, &xid, 0);
1483 }
1484 if (!lockerror) {
1485 nfs_node_unlock(np);
1486 }
1487nfsmout:
1488 NVATTR_CLEANUP(nvattr);
1489 FREE(nvattr, M_TEMP);
1490 nfsm_chain_cleanup(&nmreq);
1491 nfsm_chain_cleanup(&nmrep);
1492 return error;
1493}
1494
1495int
1496nfs4_vnop_getattr(
1497 struct vnop_getattr_args /* {
1498 * struct vnodeop_desc *a_desc;
1499 * vnode_t a_vp;
1500 * struct vnode_attr *a_vap;
1501 * vfs_context_t a_context;
1502 * } */*ap)
1503{
1504 struct vnode_attr *vap = ap->a_vap;
1505 struct nfsmount *nmp;
1506 struct nfs_vattr *nva;
1507 int error, acls, ngaflags;
1508
1509 nmp = VTONMP(ap->a_vp);
1510 if (nfs_mount_gone(nmp)) {
1511 return ENXIO;
1512 }
1513 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1514
1515 ngaflags = NGA_CACHED;
1516 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1517 ngaflags |= NGA_ACL;
1518 }
1519 MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK);
1520 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags);
1521 if (error) {
1522 goto out;
1523 }
1524
1525 /* copy what we have in nva to *a_vap */
1526 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) {
1527 dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
1528 VATTR_RETURN(vap, va_rdev, rdev);
1529 }
1530 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) {
1531 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
1532 }
1533 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) {
1534 VATTR_RETURN(vap, va_data_size, nva->nva_size);
1535 }
1536 // VATTR_RETURN(vap, va_data_alloc, ???);
1537 // VATTR_RETURN(vap, va_total_size, ???);
1538 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) {
1539 VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes);
1540 }
1541 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1542 VATTR_RETURN(vap, va_uid, nva->nva_uid);
1543 }
1544 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1545 VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid);
1546 }
1547 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1548 VATTR_RETURN(vap, va_gid, nva->nva_gid);
1549 }
1550 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1551 VATTR_RETURN(vap, va_guuid, nva->nva_guuid);
1552 }
1553 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1554 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) {
1555 VATTR_RETURN(vap, va_mode, ACCESSPERMS);
1556 } else {
1557 VATTR_RETURN(vap, va_mode, nva->nva_mode);
1558 }
1559 }
1560 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1561 (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) ||
1562 NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) ||
1563 (nva->nva_flags & NFS_FFLAG_TRIGGER))) {
1564 uint32_t flags = 0;
1565 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1566 (nva->nva_flags & NFS_FFLAG_ARCHIVED)) {
1567 flags |= SF_ARCHIVED;
1568 }
1569 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) &&
1570 (nva->nva_flags & NFS_FFLAG_HIDDEN)) {
1571 flags |= UF_HIDDEN;
1572 }
1573 VATTR_RETURN(vap, va_flags, flags);
1574 }
1575 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1576 vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE];
1577 vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE];
1578 VATTR_SET_SUPPORTED(vap, va_create_time);
1579 }
1580 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1581 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1582 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
1583 VATTR_SET_SUPPORTED(vap, va_access_time);
1584 }
1585 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1586 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1587 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
1588 VATTR_SET_SUPPORTED(vap, va_modify_time);
1589 }
1590 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1591 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1592 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
1593 VATTR_SET_SUPPORTED(vap, va_change_time);
1594 }
1595 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1596 vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP];
1597 vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP];
1598 VATTR_SET_SUPPORTED(vap, va_backup_time);
1599 }
1600 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) {
1601 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
1602 }
1603 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) {
1604 VATTR_RETURN(vap, va_type, nva->nva_type);
1605 }
1606 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) {
1607 VATTR_RETURN(vap, va_filerev, nva->nva_change);
1608 }
1609
1610 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1611 VATTR_RETURN(vap, va_acl, nva->nva_acl);
1612 nva->nva_acl = NULL;
1613 }
1614
1615 // other attrs we might support someday:
1616 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1617
1618 NVATTR_CLEANUP(nva);
1619out:
1620 FREE(nva, M_TEMP);
1621 return error;
1622}
1623
1624int
1625nfs4_setattr_rpc(
1626 nfsnode_t np,
1627 struct vnode_attr *vap,
1628 vfs_context_t ctx)
1629{
1630 struct nfsmount *nmp = NFSTONMP(np);
1631 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1632 u_int64_t xid, nextxid;
1633 struct nfsm_chain nmreq, nmrep;
1634 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1635 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1636 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1637 nfs_stateid stateid;
1638 struct nfsreq_secinfo_args si;
1639
1640 if (nfs_mount_gone(nmp)) {
1641 return ENXIO;
1642 }
1643 nfsvers = nmp->nm_vers;
1644 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1645 return EINVAL;
1646 }
1647
1648 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1649 /* we don't support setting unsupported flags (duh!) */
1650 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1651 return EINVAL; /* return EINVAL if other attributes also set */
1652 } else {
1653 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1654 }
1655 }
1656
1657 /* don't bother requesting some changes if they don't look like they are changing */
1658 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1659 VATTR_CLEAR_ACTIVE(vap, va_uid);
1660 }
1661 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1662 VATTR_CLEAR_ACTIVE(vap, va_gid);
1663 }
1664 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1665 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1666 }
1667 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1668 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1669 }
1670
1671tryagain:
1672 /* do nothing if no attributes will be sent */
1673 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1674 if (!bitmap[0] && !bitmap[1]) {
1675 return 0;
1676 }
1677
1678 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1679 nfsm_chain_null(&nmreq);
1680 nfsm_chain_null(&nmrep);
1681
1682 /*
1683 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1684 * need to invalidate any cached ACL. And if we had an ACL cached,
1685 * we might as well also fetch the new value.
1686 */
1687 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1688 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1689 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1690 if (NACLVALID(np)) {
1691 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1692 }
1693 NACLINVALIDATE(np);
1694 }
1695
1696 // PUTFH, SETATTR, GETATTR
1697 numops = 3;
1698 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1699 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1700 numops--;
1701 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1702 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1703 numops--;
1704 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1705 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1706 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1707 } else {
1708 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1709 }
1710 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1711 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1712 numops--;
1713 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1714 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1715 nfsm_chain_build_done(error, &nmreq);
1716 nfsm_assert(error, (numops == 0), EPROTO);
1717 nfsmout_if(error);
1718 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1719
1720 if ((lockerror = nfs_node_lock(np))) {
1721 error = lockerror;
1722 }
1723 nfsm_chain_skip_tag(error, &nmrep);
1724 nfsm_chain_get_32(error, &nmrep, numops);
1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1726 nfsmout_if(error);
1727 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1728 nfsmout_if(error == EBADRPC);
1729 setattr_error = error;
1730 error = 0;
1731 bmlen = NFS_ATTR_BITMAP_LEN;
1732 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1733 if (!error) {
1734 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1735 microuptime(&np->n_lastio);
1736 }
1737 nfs_vattr_set_supported(setbitmap, vap);
1738 error = setattr_error;
1739 }
1740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1741 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1742 if (error) {
1743 NATTRINVALIDATE(np);
1744 }
1745 /*
1746 * We just changed the attributes and we want to make sure that we
1747 * see the latest attributes. Get the next XID. If it's not the
1748 * next XID after the SETATTR XID, then it's possible that another
1749 * RPC was in flight at the same time and it might put stale attributes
1750 * in the cache. In that case, we invalidate the attributes and set
1751 * the attribute cache XID to guarantee that newer attributes will
1752 * get loaded next.
1753 */
1754 nextxid = 0;
1755 nfs_get_xid(&nextxid);
1756 if (nextxid != (xid + 1)) {
1757 np->n_xid = nextxid;
1758 NATTRINVALIDATE(np);
1759 }
1760nfsmout:
1761 if (!lockerror) {
1762 nfs_node_unlock(np);
1763 }
1764 nfsm_chain_cleanup(&nmreq);
1765 nfsm_chain_cleanup(&nmrep);
1766 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1767 /*
1768 * Some server's may not like ACL/mode combos that get sent.
1769 * If it looks like that's what the server choked on, try setting
1770 * just the ACL and not the mode (unless it looks like everything
1771 * but mode was already successfully set).
1772 */
1773 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1774 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1775 VATTR_CLEAR_ACTIVE(vap, va_mode);
1776 error = 0;
1777 goto tryagain;
1778 }
1779 }
1780 return error;
1781}
1782#endif /* CONFIG_NFS4 */
1783
1784/*
1785 * Wait for any pending recovery to complete.
1786 */
1787int
1788nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1789{
1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1791 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1792
1793 lck_mtx_lock(&nmp->nm_lock);
1794 while (nmp->nm_state & NFSSTA_RECOVER) {
1795 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1796 break;
1797 }
1798 nfs_mount_sock_thread_wake(nmp);
1799 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1800 slpflag = 0;
1801 }
1802 lck_mtx_unlock(&nmp->nm_lock);
1803
1804 return error;
1805}
1806
1807/*
1808 * We're about to use/manipulate NFS mount's open/lock state.
1809 * Wait for any pending state recovery to complete, then
1810 * mark the state as being in use (which will hold off
1811 * the recovery thread until we're done).
1812 */
1813int
1814nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1815{
1816 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1817 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1818
1819 if (nfs_mount_gone(nmp)) {
1820 return ENXIO;
1821 }
1822 lck_mtx_lock(&nmp->nm_lock);
1823 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1824 lck_mtx_unlock(&nmp->nm_lock);
1825 return ENXIO;
1826 }
1827 while (nmp->nm_state & NFSSTA_RECOVER) {
1828 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1829 break;
1830 }
1831 nfs_mount_sock_thread_wake(nmp);
1832 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1833 slpflag = 0;
1834 }
1835 if (!error) {
1836 nmp->nm_stateinuse++;
1837 }
1838 lck_mtx_unlock(&nmp->nm_lock);
1839
1840 return error;
1841}
1842
1843/*
1844 * We're done using/manipulating the NFS mount's open/lock
1845 * state. If the given error indicates that recovery should
1846 * be performed, we'll initiate recovery.
1847 */
1848int
1849nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1850{
1851 int restart = nfs_mount_state_error_should_restart(error);
1852
1853 if (nfs_mount_gone(nmp)) {
1854 return ENXIO;
1855 }
1856 lck_mtx_lock(&nmp->nm_lock);
1857 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1858 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1859 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1860 nfs_need_recover(nmp, error);
1861 }
1862 if (nmp->nm_stateinuse > 0) {
1863 nmp->nm_stateinuse--;
1864 } else {
1865 panic("NFS mount state in use count underrun");
1866 }
1867 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1868 wakeup(&nmp->nm_stateinuse);
1869 }
1870 lck_mtx_unlock(&nmp->nm_lock);
1871 if (error == NFSERR_GRACE) {
1872 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1873 }
1874
1875 return restart;
1876}
1877
1878/*
1879 * Does the error mean we should restart/redo a state-related operation?
1880 */
1881int
1882nfs_mount_state_error_should_restart(int error)
1883{
1884 switch (error) {
1885 case NFSERR_STALE_STATEID:
1886 case NFSERR_STALE_CLIENTID:
1887 case NFSERR_ADMIN_REVOKED:
1888 case NFSERR_EXPIRED:
1889 case NFSERR_OLD_STATEID:
1890 case NFSERR_BAD_STATEID:
1891 case NFSERR_GRACE:
1892 return 1;
1893 }
1894 return 0;
1895}
1896
1897/*
1898 * In some cases we may want to limit how many times we restart a
1899 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1900 * Base the limit on the lease (as long as it's not too short).
1901 */
1902uint
1903nfs_mount_state_max_restarts(struct nfsmount *nmp)
1904{
1905 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1906}
1907
1908/*
1909 * Does the error mean we probably lost a delegation?
1910 */
1911int
1912nfs_mount_state_error_delegation_lost(int error)
1913{
1914 switch (error) {
1915 case NFSERR_STALE_STATEID:
1916 case NFSERR_ADMIN_REVOKED:
1917 case NFSERR_EXPIRED:
1918 case NFSERR_OLD_STATEID:
1919 case NFSERR_BAD_STATEID:
1920 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1921 return 1;
1922 }
1923 return 0;
1924}
1925
1926
1927/*
1928 * Mark an NFS node's open state as busy.
1929 */
1930int
1931nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1932{
1933 struct nfsmount *nmp;
1934 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1935 int error = 0, slpflag;
1936
1937 nmp = NFSTONMP(np);
1938 if (nfs_mount_gone(nmp)) {
1939 return ENXIO;
1940 }
1941 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1942
1943 lck_mtx_lock(&np->n_openlock);
1944 while (np->n_openflags & N_OPENBUSY) {
1945 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1946 break;
1947 }
1948 np->n_openflags |= N_OPENWANT;
1949 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1950 slpflag = 0;
1951 }
1952 if (!error) {
1953 np->n_openflags |= N_OPENBUSY;
1954 }
1955 lck_mtx_unlock(&np->n_openlock);
1956
1957 return error;
1958}
1959
1960/*
1961 * Clear an NFS node's open state busy flag and wake up
1962 * anyone wanting it.
1963 */
1964void
1965nfs_open_state_clear_busy(nfsnode_t np)
1966{
1967 int wanted;
1968
1969 lck_mtx_lock(&np->n_openlock);
1970 if (!(np->n_openflags & N_OPENBUSY)) {
1971 panic("nfs_open_state_clear_busy");
1972 }
1973 wanted = (np->n_openflags & N_OPENWANT);
1974 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1975 lck_mtx_unlock(&np->n_openlock);
1976 if (wanted) {
1977 wakeup(&np->n_openflags);
1978 }
1979}
1980
1981/*
1982 * Search a mount's open owner list for the owner for this credential.
1983 * If not found and "alloc" is set, then allocate a new one.
1984 */
1985struct nfs_open_owner *
1986nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1987{
1988 uid_t uid = kauth_cred_getuid(cred);
1989 struct nfs_open_owner *noop, *newnoop = NULL;
1990
1991tryagain:
1992 lck_mtx_lock(&nmp->nm_lock);
1993 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1994 if (kauth_cred_getuid(noop->noo_cred) == uid) {
1995 break;
1996 }
1997 }
1998
1999 if (!noop && !newnoop && alloc) {
2000 lck_mtx_unlock(&nmp->nm_lock);
2001 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
2002 if (!newnoop) {
2003 return NULL;
2004 }
2005 bzero(newnoop, sizeof(*newnoop));
2006 lck_mtx_init(&newnoop->noo_lock, &nfs_open_grp, LCK_ATTR_NULL);
2007 newnoop->noo_mount = nmp;
2008 kauth_cred_ref(cred);
2009 newnoop->noo_cred = cred;
2010 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
2011 TAILQ_INIT(&newnoop->noo_opens);
2012 goto tryagain;
2013 }
2014 if (!noop && newnoop) {
2015 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
2016 os_ref_init(&newnoop->noo_refcnt, NULL);
2017 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2018 noop = newnoop;
2019 }
2020 lck_mtx_unlock(&nmp->nm_lock);
2021
2022 if (newnoop && (noop != newnoop)) {
2023 nfs_open_owner_destroy(newnoop);
2024 }
2025
2026 if (noop) {
2027 nfs_open_owner_ref(noop);
2028 }
2029
2030 return noop;
2031}
2032
2033/*
2034 * destroy an open owner that's no longer needed
2035 */
2036void
2037nfs_open_owner_destroy(struct nfs_open_owner *noop)
2038{
2039 if (noop->noo_cred) {
2040 kauth_cred_unref(&noop->noo_cred);
2041 }
2042 lck_mtx_destroy(&noop->noo_lock, &nfs_open_grp);
2043 FREE(noop, M_TEMP);
2044}
2045
2046/*
2047 * acquire a reference count on an open owner
2048 */
2049void
2050nfs_open_owner_ref(struct nfs_open_owner *noop)
2051{
2052 lck_mtx_lock(&noop->noo_lock);
2053 os_ref_retain_locked(&noop->noo_refcnt);
2054 lck_mtx_unlock(&noop->noo_lock);
2055}
2056
2057/*
2058 * drop a reference count on an open owner and destroy it if
2059 * it is no longer referenced and no longer on the mount's list.
2060 */
2061void
2062nfs_open_owner_rele(struct nfs_open_owner *noop)
2063{
2064 os_ref_count_t newcount;
2065
2066 lck_mtx_lock(&noop->noo_lock);
2067 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2068 panic("nfs_open_owner_rele: no refcnt");
2069 }
2070 newcount = os_ref_release_locked(&noop->noo_refcnt);
2071 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2072 panic("nfs_open_owner_rele: busy");
2073 }
2074 /* XXX we may potentially want to clean up idle/unused open owner structures */
2075 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2076 lck_mtx_unlock(&noop->noo_lock);
2077 return;
2078 }
2079 /* owner is no longer referenced or linked to mount, so destroy it */
2080 lck_mtx_unlock(&noop->noo_lock);
2081 nfs_open_owner_destroy(noop);
2082}
2083
2084/*
2085 * Mark an open owner as busy because we are about to
2086 * start an operation that uses and updates open owner state.
2087 */
2088int
2089nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2090{
2091 struct nfsmount *nmp;
2092 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2093 int error = 0, slpflag;
2094
2095 nmp = noop->noo_mount;
2096 if (nfs_mount_gone(nmp)) {
2097 return ENXIO;
2098 }
2099 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2100
2101 lck_mtx_lock(&noop->noo_lock);
2102 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2103 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2104 break;
2105 }
2106 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2107 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2108 slpflag = 0;
2109 }
2110 if (!error) {
2111 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2112 }
2113 lck_mtx_unlock(&noop->noo_lock);
2114
2115 return error;
2116}
2117
2118/*
2119 * Clear the busy flag on an open owner and wake up anyone waiting
2120 * to mark it busy.
2121 */
2122void
2123nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2124{
2125 int wanted;
2126
2127 lck_mtx_lock(&noop->noo_lock);
2128 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2129 panic("nfs_open_owner_clear_busy");
2130 }
2131 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2132 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2133 lck_mtx_unlock(&noop->noo_lock);
2134 if (wanted) {
2135 wakeup(noop);
2136 }
2137}
2138
2139/*
2140 * Given an open/lock owner and an error code, increment the
2141 * sequence ID if appropriate.
2142 */
2143void
2144nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2145{
2146 switch (error) {
2147 case NFSERR_STALE_CLIENTID:
2148 case NFSERR_STALE_STATEID:
2149 case NFSERR_OLD_STATEID:
2150 case NFSERR_BAD_STATEID:
2151 case NFSERR_BAD_SEQID:
2152 case NFSERR_BADXDR:
2153 case NFSERR_RESOURCE:
2154 case NFSERR_NOFILEHANDLE:
2155 /* do not increment the open seqid on these errors */
2156 return;
2157 }
2158 if (noop) {
2159 noop->noo_seqid++;
2160 }
2161 if (nlop) {
2162 nlop->nlo_seqid++;
2163 }
2164}
2165
2166/*
2167 * Search a node's open file list for any conflicts with this request.
2168 * Also find this open owner's open file structure.
2169 * If not found and "alloc" is set, then allocate one.
2170 */
2171int
2172nfs_open_file_find(
2173 nfsnode_t np,
2174 struct nfs_open_owner *noop,
2175 struct nfs_open_file **nofpp,
2176 uint32_t accessMode,
2177 uint32_t denyMode,
2178 int alloc)
2179{
2180 *nofpp = NULL;
2181 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2182}
2183
2184/*
2185 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2186 * if an existing one is not found. This is used in "create" scenarios to
2187 * officially add the provisional nofp to the node once the node is created.
2188 */
2189int
2190nfs_open_file_find_internal(
2191 nfsnode_t np,
2192 struct nfs_open_owner *noop,
2193 struct nfs_open_file **nofpp,
2194 uint32_t accessMode,
2195 uint32_t denyMode,
2196 int alloc)
2197{
2198 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2199
2200 if (!np) {
2201 goto alloc;
2202 }
2203tryagain:
2204 lck_mtx_lock(&np->n_openlock);
2205 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2206 if (nofp2->nof_owner == noop) {
2207 nofp = nofp2;
2208 if (!accessMode) {
2209 break;
2210 }
2211 }
2212 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2213 /* This request conflicts with an existing open on this client. */
2214 lck_mtx_unlock(&np->n_openlock);
2215 return EACCES;
2216 }
2217 }
2218
2219 /*
2220 * If this open owner doesn't have an open
2221 * file structure yet, we create one for it.
2222 */
2223 if (!nofp && !*nofpp && !newnofp && alloc) {
2224 lck_mtx_unlock(&np->n_openlock);
2225alloc:
2226 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2227 if (!newnofp) {
2228 return ENOMEM;
2229 }
2230 bzero(newnofp, sizeof(*newnofp));
2231 lck_mtx_init(&newnofp->nof_lock, &nfs_open_grp, LCK_ATTR_NULL);
2232 newnofp->nof_owner = noop;
2233 nfs_open_owner_ref(noop);
2234 newnofp->nof_np = np;
2235 lck_mtx_lock(&noop->noo_lock);
2236 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2237 lck_mtx_unlock(&noop->noo_lock);
2238 if (np) {
2239 goto tryagain;
2240 }
2241 }
2242 if (!nofp) {
2243 if (*nofpp) {
2244 (*nofpp)->nof_np = np;
2245 nofp = *nofpp;
2246 } else {
2247 nofp = newnofp;
2248 }
2249 if (nofp && np) {
2250 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2251 }
2252 }
2253 if (np) {
2254 lck_mtx_unlock(&np->n_openlock);
2255 }
2256
2257 if (alloc && newnofp && (nofp != newnofp)) {
2258 nfs_open_file_destroy(newnofp);
2259 }
2260
2261 *nofpp = nofp;
2262 return nofp ? 0 : ESRCH;
2263}
2264
2265/*
2266 * Destroy an open file structure.
2267 */
2268void
2269nfs_open_file_destroy(struct nfs_open_file *nofp)
2270{
2271 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2272 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2273 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2274 nfs_open_owner_rele(nofp->nof_owner);
2275 lck_mtx_destroy(&nofp->nof_lock, &nfs_open_grp);
2276 FREE(nofp, M_TEMP);
2277}
2278
2279/*
2280 * Mark an open file as busy because we are about to
2281 * start an operation that uses and updates open file state.
2282 */
2283int
2284nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2285{
2286 struct nfsmount *nmp;
2287 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2288 int error = 0, slpflag;
2289
2290 nmp = nofp->nof_owner->noo_mount;
2291 if (nfs_mount_gone(nmp)) {
2292 return ENXIO;
2293 }
2294 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2295
2296 lck_mtx_lock(&nofp->nof_lock);
2297 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2298 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2299 break;
2300 }
2301 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2302 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2303 slpflag = 0;
2304 }
2305 if (!error) {
2306 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2307 }
2308 lck_mtx_unlock(&nofp->nof_lock);
2309
2310 return error;
2311}
2312
2313/*
2314 * Clear the busy flag on an open file and wake up anyone waiting
2315 * to mark it busy.
2316 */
2317void
2318nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2319{
2320 int wanted;
2321
2322 lck_mtx_lock(&nofp->nof_lock);
2323 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2324 panic("nfs_open_file_clear_busy");
2325 }
2326 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2327 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2328 lck_mtx_unlock(&nofp->nof_lock);
2329 if (wanted) {
2330 wakeup(nofp);
2331 }
2332}
2333
2334/*
2335 * Add the open state for the given access/deny modes to this open file.
2336 */
2337void
2338nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2339{
2340 lck_mtx_lock(&nofp->nof_lock);
2341 nofp->nof_access |= accessMode;
2342 nofp->nof_deny |= denyMode;
2343
2344 if (delegated) {
2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2347 nofp->nof_d_r++;
2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2349 nofp->nof_d_w++;
2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2351 nofp->nof_d_rw++;
2352 }
2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2355 nofp->nof_d_r_dw++;
2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2357 nofp->nof_d_w_dw++;
2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2359 nofp->nof_d_rw_dw++;
2360 }
2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2363 nofp->nof_d_r_drw++;
2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2365 nofp->nof_d_w_drw++;
2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2367 nofp->nof_d_rw_drw++;
2368 }
2369 }
2370 } else {
2371 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2372 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2373 nofp->nof_r++;
2374 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2375 nofp->nof_w++;
2376 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2377 nofp->nof_rw++;
2378 }
2379 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2380 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2381 nofp->nof_r_dw++;
2382 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2383 nofp->nof_w_dw++;
2384 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2385 nofp->nof_rw_dw++;
2386 }
2387 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2388 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2389 nofp->nof_r_drw++;
2390 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2391 nofp->nof_w_drw++;
2392 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2393 nofp->nof_rw_drw++;
2394 }
2395 }
2396 }
2397
2398 nofp->nof_opencnt++;
2399 lck_mtx_unlock(&nofp->nof_lock);
2400}
2401
2402/*
2403 * Find which particular open combo will be closed and report what
2404 * the new modes will be and whether the open was delegated.
2405 */
2406void
2407nfs_open_file_remove_open_find(
2408 struct nfs_open_file *nofp,
2409 uint32_t accessMode,
2410 uint32_t denyMode,
2411 uint8_t *newAccessMode,
2412 uint8_t *newDenyMode,
2413 int *delegated)
2414{
2415 /*
2416 * Calculate new modes: a mode bit gets removed when there's only
2417 * one count in all the corresponding counts
2418 */
2419 *newAccessMode = nofp->nof_access;
2420 *newDenyMode = nofp->nof_deny;
2421
2422 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2423 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2424 ((nofp->nof_r + nofp->nof_d_r +
2425 nofp->nof_rw + nofp->nof_d_rw +
2426 nofp->nof_r_dw + nofp->nof_d_r_dw +
2427 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2428 nofp->nof_r_drw + nofp->nof_d_r_drw +
2429 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2430 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2431 }
2432 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2433 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2434 ((nofp->nof_w + nofp->nof_d_w +
2435 nofp->nof_rw + nofp->nof_d_rw +
2436 nofp->nof_w_dw + nofp->nof_d_w_dw +
2437 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2438 nofp->nof_w_drw + nofp->nof_d_w_drw +
2439 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2440 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2441 }
2442 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2443 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2444 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2445 nofp->nof_w_drw + nofp->nof_d_w_drw +
2446 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2447 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2448 }
2449 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2450 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2451 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2452 nofp->nof_w_drw + nofp->nof_d_w_drw +
2453 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2454 nofp->nof_r_dw + nofp->nof_d_r_dw +
2455 nofp->nof_w_dw + nofp->nof_d_w_dw +
2456 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2457 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2458 }
2459
2460 /* Find the corresponding open access/deny mode counter. */
2461 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2462 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2463 *delegated = (nofp->nof_d_r != 0);
2464 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2465 *delegated = (nofp->nof_d_w != 0);
2466 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2467 *delegated = (nofp->nof_d_rw != 0);
2468 } else {
2469 *delegated = 0;
2470 }
2471 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2472 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2473 *delegated = (nofp->nof_d_r_dw != 0);
2474 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2475 *delegated = (nofp->nof_d_w_dw != 0);
2476 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2477 *delegated = (nofp->nof_d_rw_dw != 0);
2478 } else {
2479 *delegated = 0;
2480 }
2481 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2483 *delegated = (nofp->nof_d_r_drw != 0);
2484 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2485 *delegated = (nofp->nof_d_w_drw != 0);
2486 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2487 *delegated = (nofp->nof_d_rw_drw != 0);
2488 } else {
2489 *delegated = 0;
2490 }
2491 }
2492}
2493
2494/*
2495 * Remove the open state for the given access/deny modes to this open file.
2496 */
2497void
2498nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2499{
2500 uint8_t newAccessMode, newDenyMode;
2501 int delegated = 0;
2502
2503 lck_mtx_lock(&nofp->nof_lock);
2504 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2505
2506 /* Decrement the corresponding open access/deny mode counter. */
2507 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2508 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2509 if (delegated) {
2510 if (nofp->nof_d_r == 0) {
2511 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2512 } else {
2513 nofp->nof_d_r--;
2514 }
2515 } else {
2516 if (nofp->nof_r == 0) {
2517 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2518 } else {
2519 nofp->nof_r--;
2520 }
2521 }
2522 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2523 if (delegated) {
2524 if (nofp->nof_d_w == 0) {
2525 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2526 } else {
2527 nofp->nof_d_w--;
2528 }
2529 } else {
2530 if (nofp->nof_w == 0) {
2531 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2532 } else {
2533 nofp->nof_w--;
2534 }
2535 }
2536 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2537 if (delegated) {
2538 if (nofp->nof_d_rw == 0) {
2539 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2540 } else {
2541 nofp->nof_d_rw--;
2542 }
2543 } else {
2544 if (nofp->nof_rw == 0) {
2545 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2546 } else {
2547 nofp->nof_rw--;
2548 }
2549 }
2550 }
2551 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2552 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2553 if (delegated) {
2554 if (nofp->nof_d_r_dw == 0) {
2555 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2556 } else {
2557 nofp->nof_d_r_dw--;
2558 }
2559 } else {
2560 if (nofp->nof_r_dw == 0) {
2561 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2562 } else {
2563 nofp->nof_r_dw--;
2564 }
2565 }
2566 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2567 if (delegated) {
2568 if (nofp->nof_d_w_dw == 0) {
2569 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2570 } else {
2571 nofp->nof_d_w_dw--;
2572 }
2573 } else {
2574 if (nofp->nof_w_dw == 0) {
2575 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2576 } else {
2577 nofp->nof_w_dw--;
2578 }
2579 }
2580 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2581 if (delegated) {
2582 if (nofp->nof_d_rw_dw == 0) {
2583 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2584 } else {
2585 nofp->nof_d_rw_dw--;
2586 }
2587 } else {
2588 if (nofp->nof_rw_dw == 0) {
2589 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2590 } else {
2591 nofp->nof_rw_dw--;
2592 }
2593 }
2594 }
2595 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2596 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2597 if (delegated) {
2598 if (nofp->nof_d_r_drw == 0) {
2599 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2600 } else {
2601 nofp->nof_d_r_drw--;
2602 }
2603 } else {
2604 if (nofp->nof_r_drw == 0) {
2605 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2606 } else {
2607 nofp->nof_r_drw--;
2608 }
2609 }
2610 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2611 if (delegated) {
2612 if (nofp->nof_d_w_drw == 0) {
2613 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2614 } else {
2615 nofp->nof_d_w_drw--;
2616 }
2617 } else {
2618 if (nofp->nof_w_drw == 0) {
2619 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2620 } else {
2621 nofp->nof_w_drw--;
2622 }
2623 }
2624 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2625 if (delegated) {
2626 if (nofp->nof_d_rw_drw == 0) {
2627 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2628 } else {
2629 nofp->nof_d_rw_drw--;
2630 }
2631 } else {
2632 if (nofp->nof_rw_drw == 0) {
2633 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2634 } else {
2635 nofp->nof_rw_drw--;
2636 }
2637 }
2638 }
2639 }
2640
2641 /* update the modes */
2642 nofp->nof_access = newAccessMode;
2643 nofp->nof_deny = newDenyMode;
2644 nofp->nof_opencnt--;
2645 lck_mtx_unlock(&nofp->nof_lock);
2646}
2647
2648#if CONFIG_NFS4
2649/*
2650 * Get the current (delegation, lock, open, default) stateid for this node.
2651 * If node has a delegation, use that stateid.
2652 * If pid has a lock, use the lockowner's stateid.
2653 * Or use the open file's stateid.
2654 * If no open file, use a default stateid of all ones.
2655 */
2656void
2657nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2658{
2659 struct nfsmount *nmp = NFSTONMP(np);
2660 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2661 struct nfs_open_owner *noop = NULL;
2662 struct nfs_open_file *nofp = NULL;
2663 struct nfs_lock_owner *nlop = NULL;
2664 nfs_stateid *s = NULL;
2665
2666 if (np->n_openflags & N_DELEG_MASK) {
2667 s = &np->n_dstateid;
2668 } else {
2669 if (p) {
2670 nlop = nfs_lock_owner_find(np, p, 0);
2671 }
2672 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2673 /* we hold locks, use lock stateid */
2674 s = &nlop->nlo_stateid;
2675 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2676 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2677 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2678 nofp->nof_access) {
2679 /* we (should) have the file open, use open stateid */
2680 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2681 nfs4_reopen(nofp, thd);
2682 }
2683 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2684 s = &nofp->nof_stateid;
2685 }
2686 }
2687 }
2688
2689 if (s) {
2690 sid->seqid = s->seqid;
2691 sid->other[0] = s->other[0];
2692 sid->other[1] = s->other[1];
2693 sid->other[2] = s->other[2];
2694 } else {
2695 /* named attributes may not have a stateid for reads, so don't complain for them */
2696 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2697 NP(np, "nfs_get_stateid: no stateid");
2698 }
2699 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2700 }
2701 if (nlop) {
2702 nfs_lock_owner_rele(nlop);
2703 }
2704 if (noop) {
2705 nfs_open_owner_rele(noop);
2706 }
2707}
2708
2709
2710/*
2711 * When we have a delegation, we may be able to perform the OPEN locally.
2712 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2713 */
2714int
2715nfs4_open_delegated(
2716 nfsnode_t np,
2717 struct nfs_open_file *nofp,
2718 uint32_t accessMode,
2719 uint32_t denyMode,
2720 vfs_context_t ctx)
2721{
2722 int error = 0, ismember, readtoo = 0, authorized = 0;
2723 uint32_t action;
2724 struct kauth_acl_eval eval;
2725 kauth_cred_t cred = vfs_context_ucred(ctx);
2726
2727 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2728 /*
2729 * Try to open it for read access too,
2730 * so the buffer cache can read data.
2731 */
2732 readtoo = 1;
2733 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2734 }
2735
2736tryagain:
2737 action = 0;
2738 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2739 action |= KAUTH_VNODE_READ_DATA;
2740 }
2741 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2742 action |= KAUTH_VNODE_WRITE_DATA;
2743 }
2744
2745 /* evaluate ACE (if we have one) */
2746 if (np->n_dace.ace_flags) {
2747 eval.ae_requested = action;
2748 eval.ae_acl = &np->n_dace;
2749 eval.ae_count = 1;
2750 eval.ae_options = 0;
2751 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2752 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2753 }
2754 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2755 if (!error && ismember) {
2756 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2757 }
2758
2759 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2760 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2761 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2762 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2763
2764 error = kauth_acl_evaluate(cred, &eval);
2765
2766 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2767 authorized = 1;
2768 }
2769 }
2770
2771 if (!authorized) {
2772 /* need to ask the server via ACCESS */
2773 struct vnop_access_args naa;
2774 naa.a_desc = &vnop_access_desc;
2775 naa.a_vp = NFSTOV(np);
2776 naa.a_action = action;
2777 naa.a_context = ctx;
2778 if (!(error = nfs_vnop_access(&naa))) {
2779 authorized = 1;
2780 }
2781 }
2782
2783 if (!authorized) {
2784 if (readtoo) {
2785 /* try again without the extra read access */
2786 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2787 readtoo = 0;
2788 goto tryagain;
2789 }
2790 return error ? error : EACCES;
2791 }
2792
2793 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2794
2795 return 0;
2796}
2797
2798
2799/*
2800 * Open a file with the given access/deny modes.
2801 *
2802 * If we have a delegation, we may be able to handle the open locally.
2803 * Otherwise, we will always send the open RPC even if this open's mode is
2804 * a subset of all the existing opens. This makes sure that we will always
2805 * be able to do a downgrade to any of the open modes.
2806 *
2807 * Note: local conflicts should have already been checked in nfs_open_file_find().
2808 */
2809int
2810nfs4_open(
2811 nfsnode_t np,
2812 struct nfs_open_file *nofp,
2813 uint32_t accessMode,
2814 uint32_t denyMode,
2815 vfs_context_t ctx)
2816{
2817 vnode_t vp = NFSTOV(np);
2818 vnode_t dvp = NULL;
2819 struct componentname cn;
2820 const char *vname = NULL;
2821 uint32_t namelen;
2822 char smallname[128];
2823 char *filename = NULL;
2824 int error = 0, readtoo = 0;
2825
2826 /*
2827 * We can handle the OPEN ourselves if we have a delegation,
2828 * unless it's a read delegation and the open is asking for
2829 * either write access or deny read. We also don't bother to
2830 * use the delegation if it's being returned.
2831 */
2832 if (np->n_openflags & N_DELEG_MASK) {
2833 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2834 return error;
2835 }
2836 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2837 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2838 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2839 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2840 nfs_open_state_clear_busy(np);
2841 return error;
2842 }
2843 nfs_open_state_clear_busy(np);
2844 }
2845
2846 /*
2847 * [sigh] We can't trust VFS to get the parent right for named
2848 * attribute nodes. (It likes to reparent the nodes after we've
2849 * created them.) Luckily we can probably get the right parent
2850 * from the n_parent we have stashed away.
2851 */
2852 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2853 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2854 dvp = NULL;
2855 }
2856 if (!dvp) {
2857 dvp = vnode_getparent(vp);
2858 }
2859 vname = vnode_getname(vp);
2860 if (!dvp || !vname) {
2861 if (!error) {
2862 error = EIO;
2863 }
2864 goto out;
2865 }
2866 filename = &smallname[0];
2867 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2868 if (namelen >= sizeof(smallname)) {
2869 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
2870 if (!filename) {
2871 error = ENOMEM;
2872 goto out;
2873 }
2874 snprintf(filename, namelen + 1, "%s", vname);
2875 }
2876 bzero(&cn, sizeof(cn));
2877 cn.cn_nameptr = filename;
2878 cn.cn_namelen = namelen;
2879
2880 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2881 /*
2882 * Try to open it for read access too,
2883 * so the buffer cache can read data.
2884 */
2885 readtoo = 1;
2886 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2887 }
2888tryagain:
2889 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2890 if (error) {
2891 if (!nfs_mount_state_error_should_restart(error) &&
2892 (error != EINTR) && (error != ERESTART) && readtoo) {
2893 /* try again without the extra read access */
2894 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2895 readtoo = 0;
2896 goto tryagain;
2897 }
2898 goto out;
2899 }
2900 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2901out:
2902 if (filename && (filename != &smallname[0])) {
2903 FREE(filename, M_TEMP);
2904 }
2905 if (vname) {
2906 vnode_putname(vname);
2907 }
2908 if (dvp != NULLVP) {
2909 vnode_put(dvp);
2910 }
2911 return error;
2912}
2913#endif /* CONFIG_NFS4 */
2914
2915int
2916nfs_vnop_mmap(
2917 struct vnop_mmap_args /* {
2918 * struct vnodeop_desc *a_desc;
2919 * vnode_t a_vp;
2920 * int a_fflags;
2921 * vfs_context_t a_context;
2922 * } */*ap)
2923{
2924 vfs_context_t ctx = ap->a_context;
2925 vnode_t vp = ap->a_vp;
2926 nfsnode_t np = VTONFS(vp);
2927 int error = 0, delegated = 0;
2928 uint8_t accessMode, denyMode;
2929 struct nfsmount *nmp;
2930 struct nfs_open_owner *noop = NULL;
2931 struct nfs_open_file *nofp = NULL;
2932
2933 nmp = VTONMP(vp);
2934 if (nfs_mount_gone(nmp)) {
2935 return ENXIO;
2936 }
2937
2938 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2939 return EINVAL;
2940 }
2941 if (np->n_flag & NREVOKE) {
2942 return EIO;
2943 }
2944
2945 /*
2946 * fflags contains some combination of: PROT_READ, PROT_WRITE
2947 * Since it's not possible to mmap() without having the file open for reading,
2948 * read access is always there (regardless if PROT_READ is not set).
2949 */
2950 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2951 if (ap->a_fflags & PROT_WRITE) {
2952 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2953 }
2954 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2955
2956 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2957 if (!noop) {
2958 return ENOMEM;
2959 }
2960
2961restart:
2962 error = nfs_mount_state_in_use_start(nmp, NULL);
2963 if (error) {
2964 nfs_open_owner_rele(noop);
2965 return error;
2966 }
2967 if (np->n_flag & NREVOKE) {
2968 error = EIO;
2969 nfs_mount_state_in_use_end(nmp, 0);
2970 nfs_open_owner_rele(noop);
2971 return error;
2972 }
2973
2974 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2975 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2976 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2977 error = EPERM;
2978 }
2979#if CONFIG_NFS4
2980 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2981 error = nfs4_reopen(nofp, NULL);
2982 nofp = NULL;
2983 if (!error) {
2984 nfs_mount_state_in_use_end(nmp, 0);
2985 goto restart;
2986 }
2987 }
2988#endif
2989 if (!error) {
2990 error = nfs_open_file_set_busy(nofp, NULL);
2991 }
2992 if (error) {
2993 nofp = NULL;
2994 goto out;
2995 }
2996
2997 /*
2998 * The open reference for mmap must mirror an existing open because
2999 * we may need to reclaim it after the file is closed.
3000 * So grab another open count matching the accessMode passed in.
3001 * If we already had an mmap open, prefer read/write without deny mode.
3002 * This means we may have to drop the current mmap open first.
3003 *
3004 * N.B. We should have an open for the mmap, because, mmap was
3005 * called on an open descriptor, or we've created an open for read
3006 * from reading the first page for execve. However, if we piggy
3007 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3008 * that open may have closed.
3009 */
3010
3011 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
3012 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
3013 /* We shouldn't get here. We've already open the file for execve */
3014 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
3015 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3016 }
3017 /*
3018 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3019 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3020 */
3021 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
3022 /* not asking for just read access -> fail */
3023 error = EPERM;
3024 goto out;
3025 }
3026 /* we don't have the file open, so open it for read access */
3027 if (nmp->nm_vers < NFS_VER4) {
3028 /* NFS v2/v3 opens are always allowed - so just add it. */
3029 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
3030 error = 0;
3031 }
3032#if CONFIG_NFS4
3033 else {
3034 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3035 }
3036#endif
3037 if (!error) {
3038 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3039 }
3040 if (error) {
3041 goto out;
3042 }
3043 }
3044
3045 /* determine deny mode for open */
3046 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3047 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3048 delegated = 1;
3049 if (nofp->nof_d_rw) {
3050 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3051 } else if (nofp->nof_d_rw_dw) {
3052 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3053 } else if (nofp->nof_d_rw_drw) {
3054 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3055 }
3056 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3057 delegated = 0;
3058 if (nofp->nof_rw) {
3059 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3060 } else if (nofp->nof_rw_dw) {
3061 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3062 } else if (nofp->nof_rw_drw) {
3063 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3064 }
3065 } else {
3066 error = EPERM;
3067 }
3068 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3069 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3070 delegated = 1;
3071 if (nofp->nof_d_r) {
3072 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3073 } else if (nofp->nof_d_r_dw) {
3074 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3075 } else if (nofp->nof_d_r_drw) {
3076 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3077 }
3078 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3079 delegated = 0;
3080 if (nofp->nof_r) {
3081 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3082 } else if (nofp->nof_r_dw) {
3083 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3084 } else if (nofp->nof_r_drw) {
3085 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3086 }
3087 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3088 /*
3089 * This clause and the one below is to co-opt a read write access
3090 * for a read only mmaping. We probably got here in that an
3091 * existing rw open for an executable file already exists.
3092 */
3093 delegated = 1;
3094 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3095 if (nofp->nof_d_rw) {
3096 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3097 } else if (nofp->nof_d_rw_dw) {
3098 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3099 } else if (nofp->nof_d_rw_drw) {
3100 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3101 }
3102 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3103 delegated = 0;
3104 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3105 if (nofp->nof_rw) {
3106 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3107 } else if (nofp->nof_rw_dw) {
3108 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3109 } else if (nofp->nof_rw_drw) {
3110 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3111 }
3112 } else {
3113 error = EPERM;
3114 }
3115 }
3116 if (error) { /* mmap mode without proper open mode */
3117 goto out;
3118 }
3119
3120 /*
3121 * If the existing mmap access is more than the new access OR the
3122 * existing access is the same and the existing deny mode is less,
3123 * then we'll stick with the existing mmap open mode.
3124 */
3125 if ((nofp->nof_mmap_access > accessMode) ||
3126 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3127 goto out;
3128 }
3129
3130 /* update mmap open mode */
3131 if (nofp->nof_mmap_access) {
3132 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3133 if (error) {
3134 if (!nfs_mount_state_error_should_restart(error)) {
3135 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3136 }
3137 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3138 goto out;
3139 }
3140 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3141 }
3142
3143 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3144 nofp->nof_mmap_access = accessMode;
3145 nofp->nof_mmap_deny = denyMode;
3146
3147out:
3148 if (nofp) {
3149 nfs_open_file_clear_busy(nofp);
3150 }
3151 if (nfs_mount_state_in_use_end(nmp, error)) {
3152 nofp = NULL;
3153 goto restart;
3154 }
3155 if (noop) {
3156 nfs_open_owner_rele(noop);
3157 }
3158
3159 if (!error) {
3160 int ismapped = 0;
3161 nfs_node_lock_force(np);
3162 if ((np->n_flag & NISMAPPED) == 0) {
3163 np->n_flag |= NISMAPPED;
3164 ismapped = 1;
3165 }
3166 nfs_node_unlock(np);
3167 if (ismapped) {
3168 lck_mtx_lock(&nmp->nm_lock);
3169 nmp->nm_state &= ~NFSSTA_SQUISHY;
3170 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3171 if (nmp->nm_curdeadtimeout <= 0) {
3172 nmp->nm_deadto_start = 0;
3173 }
3174 nmp->nm_mappers++;
3175 lck_mtx_unlock(&nmp->nm_lock);
3176 }
3177 }
3178
3179 return error;
3180}
3181
3182int
3183nfs_vnop_mmap_check(
3184 struct vnop_mmap_check_args /* {
3185 * struct vnodeop_desc *a_desc;
3186 * vnode_t a_vp;
3187 * int a_flags;
3188 * vfs_context_t a_context;
3189 * } */*ap)
3190{
3191 vfs_context_t ctx = ap->a_context;
3192 vnode_t vp = ap->a_vp;
3193 struct nfsmount *nmp = VTONMP(vp);
3194 struct vnop_access_args naa;
3195 int error = 0;
3196
3197 if (nfs_mount_gone(nmp)) {
3198 return ENXIO;
3199 }
3200
3201 if (vnode_isreg(vp)) {
3202 /*
3203 * We only need to ensure that a page-in will be
3204 * possible with these credentials. Everything
3205 * else has been checked at other layers.
3206 */
3207 naa.a_desc = &vnop_access_desc;
3208 naa.a_vp = vp;
3209 naa.a_action = KAUTH_VNODE_READ_DATA;
3210 naa.a_context = ctx;
3211
3212 /* compute actual success/failure based on accessibility */
3213 error = nfs_vnop_access(&naa);
3214 }
3215
3216 return error;
3217}
3218
3219int
3220nfs_vnop_mnomap(
3221 struct vnop_mnomap_args /* {
3222 * struct vnodeop_desc *a_desc;
3223 * vnode_t a_vp;
3224 * vfs_context_t a_context;
3225 * } */*ap)
3226{
3227 vfs_context_t ctx = ap->a_context;
3228 vnode_t vp = ap->a_vp;
3229 nfsnode_t np = VTONFS(vp);
3230 struct nfsmount *nmp;
3231 struct nfs_open_file *nofp = NULL;
3232 off_t size;
3233 int error;
3234 int is_mapped_flag = 0;
3235
3236 nmp = VTONMP(vp);
3237 if (nfs_mount_gone(nmp)) {
3238 return ENXIO;
3239 }
3240
3241 nfs_node_lock_force(np);
3242 if (np->n_flag & NISMAPPED) {
3243 is_mapped_flag = 1;
3244 np->n_flag &= ~NISMAPPED;
3245 }
3246 nfs_node_unlock(np);
3247 if (is_mapped_flag) {
3248 lck_mtx_lock(&nmp->nm_lock);
3249 if (nmp->nm_mappers) {
3250 nmp->nm_mappers--;
3251 } else {
3252 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3253 }
3254 lck_mtx_unlock(&nmp->nm_lock);
3255 }
3256
3257 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3258 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3259 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3260 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3261 }
3262
3263 /* walk all open files and close all mmap opens */
3264loop:
3265 error = nfs_mount_state_in_use_start(nmp, NULL);
3266 if (error) {
3267 return error;
3268 }
3269 lck_mtx_lock(&np->n_openlock);
3270 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3271 if (!nofp->nof_mmap_access) {
3272 continue;
3273 }
3274 lck_mtx_unlock(&np->n_openlock);
3275#if CONFIG_NFS4
3276 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3277 error = nfs4_reopen(nofp, NULL);
3278 if (!error) {
3279 nfs_mount_state_in_use_end(nmp, 0);
3280 goto loop;
3281 }
3282 }
3283#endif
3284 if (!error) {
3285 error = nfs_open_file_set_busy(nofp, NULL);
3286 }
3287 if (error) {
3288 lck_mtx_lock(&np->n_openlock);
3289 break;
3290 }
3291 if (nofp->nof_mmap_access) {
3292 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3293 if (!nfs_mount_state_error_should_restart(error)) {
3294 if (error) { /* not a state-operation-restarting error, so just clear the access */
3295 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3296 }
3297 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3298 }
3299 if (error) {
3300 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3301 }
3302 }
3303 nfs_open_file_clear_busy(nofp);
3304 nfs_mount_state_in_use_end(nmp, error);
3305 goto loop;
3306 }
3307 lck_mtx_unlock(&np->n_openlock);
3308 nfs_mount_state_in_use_end(nmp, error);
3309 return error;
3310}
3311
3312/*
3313 * Search a node's lock owner list for the owner for this process.
3314 * If not found and "alloc" is set, then allocate a new one.
3315 */
3316struct nfs_lock_owner *
3317nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3318{
3319 pid_t pid = proc_pid(p);
3320 struct nfs_lock_owner *nlop, *newnlop = NULL;
3321
3322tryagain:
3323 lck_mtx_lock(&np->n_openlock);
3324 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3325 os_ref_count_t newcount;
3326
3327 if (nlop->nlo_pid != pid) {
3328 continue;
3329 }
3330 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
3331 break;
3332 }
3333 /* stale lock owner... reuse it if we can */
3334 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3335 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3336 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3337 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3338 lck_mtx_unlock(&np->n_openlock);
3339 goto tryagain;
3340 }
3341 nlop->nlo_pid_start = p->p_start;
3342 nlop->nlo_seqid = 0;
3343 nlop->nlo_stategenid = 0;
3344 break;
3345 }
3346
3347 if (!nlop && !newnlop && alloc) {
3348 lck_mtx_unlock(&np->n_openlock);
3349 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3350 if (!newnlop) {
3351 return NULL;
3352 }
3353 bzero(newnlop, sizeof(*newnlop));
3354 lck_mtx_init(&newnlop->nlo_lock, &nfs_open_grp, LCK_ATTR_NULL);
3355 newnlop->nlo_pid = pid;
3356 newnlop->nlo_pid_start = p->p_start;
3357 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3358 TAILQ_INIT(&newnlop->nlo_locks);
3359 goto tryagain;
3360 }
3361 if (!nlop && newnlop) {
3362 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3363 os_ref_init(&newnlop->nlo_refcnt, NULL);
3364 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3365 nlop = newnlop;
3366 }
3367 lck_mtx_unlock(&np->n_openlock);
3368
3369 if (newnlop && (nlop != newnlop)) {
3370 nfs_lock_owner_destroy(newnlop);
3371 }
3372
3373 if (nlop) {
3374 nfs_lock_owner_ref(nlop);
3375 }
3376
3377 return nlop;
3378}
3379
3380/*
3381 * destroy a lock owner that's no longer needed
3382 */
3383void
3384nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3385{
3386 if (nlop->nlo_open_owner) {
3387 nfs_open_owner_rele(nlop->nlo_open_owner);
3388 nlop->nlo_open_owner = NULL;
3389 }
3390 lck_mtx_destroy(&nlop->nlo_lock, &nfs_open_grp);
3391 FREE(nlop, M_TEMP);
3392}
3393
3394/*
3395 * acquire a reference count on a lock owner
3396 */
3397void
3398nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3399{
3400 lck_mtx_lock(&nlop->nlo_lock);
3401 os_ref_retain_locked(&nlop->nlo_refcnt);
3402 lck_mtx_unlock(&nlop->nlo_lock);
3403}
3404
3405/*
3406 * drop a reference count on a lock owner and destroy it if
3407 * it is no longer referenced and no longer on the mount's list.
3408 */
3409void
3410nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3411{
3412 os_ref_count_t newcount;
3413
3414 lck_mtx_lock(&nlop->nlo_lock);
3415 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3416 panic("nfs_lock_owner_rele: no refcnt");
3417 }
3418 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3419 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3420 panic("nfs_lock_owner_rele: busy");
3421 }
3422 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3423 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3424 lck_mtx_unlock(&nlop->nlo_lock);
3425 return;
3426 }
3427 /* owner is no longer referenced or linked to mount, so destroy it */
3428 lck_mtx_unlock(&nlop->nlo_lock);
3429 nfs_lock_owner_destroy(nlop);
3430}
3431
3432/*
3433 * Mark a lock owner as busy because we are about to
3434 * start an operation that uses and updates lock owner state.
3435 */
3436int
3437nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3438{
3439 struct nfsmount *nmp;
3440 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3441 int error = 0, slpflag;
3442
3443 nmp = nlop->nlo_open_owner->noo_mount;
3444 if (nfs_mount_gone(nmp)) {
3445 return ENXIO;
3446 }
3447 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3448
3449 lck_mtx_lock(&nlop->nlo_lock);
3450 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3451 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3452 break;
3453 }
3454 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3455 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3456 slpflag = 0;
3457 }
3458 if (!error) {
3459 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3460 }
3461 lck_mtx_unlock(&nlop->nlo_lock);
3462
3463 return error;
3464}
3465
3466/*
3467 * Clear the busy flag on a lock owner and wake up anyone waiting
3468 * to mark it busy.
3469 */
3470void
3471nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3472{
3473 int wanted;
3474
3475 lck_mtx_lock(&nlop->nlo_lock);
3476 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3477 panic("nfs_lock_owner_clear_busy");
3478 }
3479 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3480 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3481 lck_mtx_unlock(&nlop->nlo_lock);
3482 if (wanted) {
3483 wakeup(nlop);
3484 }
3485}
3486
3487/*
3488 * Insert a held lock into a lock owner's sorted list.
3489 * (flock locks are always inserted at the head the list)
3490 */
3491void
3492nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3493{
3494 struct nfs_file_lock *nflp;
3495
3496 /* insert new lock in lock owner's held lock list */
3497 lck_mtx_lock(&nlop->nlo_lock);
3498 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3499 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3500 } else {
3501 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3502 if (newnflp->nfl_start < nflp->nfl_start) {
3503 break;
3504 }
3505 }
3506 if (nflp) {
3507 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3508 } else {
3509 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3510 }
3511 }
3512 lck_mtx_unlock(&nlop->nlo_lock);
3513}
3514
3515/*
3516 * Get a file lock structure for this lock owner.
3517 */
3518struct nfs_file_lock *
3519nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3520{
3521 struct nfs_file_lock *nflp = NULL;
3522
3523 lck_mtx_lock(&nlop->nlo_lock);
3524 if (!nlop->nlo_alock.nfl_owner) {
3525 nflp = &nlop->nlo_alock;
3526 nflp->nfl_owner = nlop;
3527 }
3528 lck_mtx_unlock(&nlop->nlo_lock);
3529 if (!nflp) {
3530 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3531 if (!nflp) {
3532 return NULL;
3533 }
3534 bzero(nflp, sizeof(*nflp));
3535 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3536 nflp->nfl_owner = nlop;
3537 }
3538 nfs_lock_owner_ref(nlop);
3539 return nflp;
3540}
3541
3542/*
3543 * destroy the given NFS file lock structure
3544 */
3545void
3546nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3547{
3548 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3549
3550 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3551 nflp->nfl_owner = NULL;
3552 FREE(nflp, M_TEMP);
3553 } else {
3554 lck_mtx_lock(&nlop->nlo_lock);
3555 bzero(nflp, sizeof(*nflp));
3556 lck_mtx_unlock(&nlop->nlo_lock);
3557 }
3558 nfs_lock_owner_rele(nlop);
3559}
3560
3561/*
3562 * Check if one file lock conflicts with another.
3563 * (nflp1 is the new lock. nflp2 is the existing lock.)
3564 */
3565int
3566nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3567{
3568 /* no conflict if lock is dead */
3569 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3570 return 0;
3571 }
3572 /* no conflict if it's ours - unless the lock style doesn't match */
3573 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3574 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3575 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3576 (nflp1->nfl_start > nflp2->nfl_start) &&
3577 (nflp1->nfl_end < nflp2->nfl_end)) {
3578 *willsplit = 1;
3579 }
3580 return 0;
3581 }
3582 /* no conflict if ranges don't overlap */
3583 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3584 return 0;
3585 }
3586 /* no conflict if neither lock is exclusive */
3587 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3588 return 0;
3589 }
3590 /* conflict */
3591 return 1;
3592}
3593
3594#if CONFIG_NFS4
3595/*
3596 * Send an NFSv4 LOCK RPC to the server.
3597 */
3598int
3599nfs4_setlock_rpc(
3600 nfsnode_t np,
3601 struct nfs_open_file *nofp,
3602 struct nfs_file_lock *nflp,
3603 int reclaim,
3604 int flags,
3605 thread_t thd,
3606 kauth_cred_t cred)
3607{
3608 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3609 struct nfsmount *nmp;
3610 struct nfsm_chain nmreq, nmrep;
3611 uint64_t xid;
3612 uint32_t locktype;
3613 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3614 struct nfsreq_secinfo_args si;
3615
3616 nmp = NFSTONMP(np);
3617 if (nfs_mount_gone(nmp)) {
3618 return ENXIO;
3619 }
3620 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3621 return EINVAL;
3622 }
3623
3624 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3625 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3626 ((nflp->nfl_type == F_WRLCK) ?
3627 NFS_LOCK_TYPE_WRITEW :
3628 NFS_LOCK_TYPE_READW) :
3629 ((nflp->nfl_type == F_WRLCK) ?
3630 NFS_LOCK_TYPE_WRITE :
3631 NFS_LOCK_TYPE_READ);
3632 if (newlocker) {
3633 error = nfs_open_file_set_busy(nofp, thd);
3634 if (error) {
3635 return error;
3636 }
3637 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3638 if (error) {
3639 nfs_open_file_clear_busy(nofp);
3640 return error;
3641 }
3642 if (!nlop->nlo_open_owner) {
3643 nfs_open_owner_ref(nofp->nof_owner);
3644 nlop->nlo_open_owner = nofp->nof_owner;
3645 }
3646 }
3647 error = nfs_lock_owner_set_busy(nlop, thd);
3648 if (error) {
3649 if (newlocker) {
3650 nfs_open_owner_clear_busy(nofp->nof_owner);
3651 nfs_open_file_clear_busy(nofp);
3652 }
3653 return error;
3654 }
3655
3656 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3657 nfsm_chain_null(&nmreq);
3658 nfsm_chain_null(&nmrep);
3659
3660 // PUTFH, GETATTR, LOCK
3661 numops = 3;
3662 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3663 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3664 numops--;
3665 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3666 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3667 numops--;
3668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3669 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3670 numops--;
3671 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3672 nfsm_chain_add_32(error, &nmreq, locktype);
3673 nfsm_chain_add_32(error, &nmreq, reclaim);
3674 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3675 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3676 nfsm_chain_add_32(error, &nmreq, newlocker);
3677 if (newlocker) {
3678 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3679 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3680 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3681 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3682 } else {
3683 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3684 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3685 }
3686 nfsm_chain_build_done(error, &nmreq);
3687 nfsm_assert(error, (numops == 0), EPROTO);
3688 nfsmout_if(error);
3689
3690 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3691
3692 if ((lockerror = nfs_node_lock(np))) {
3693 error = lockerror;
3694 }
3695 nfsm_chain_skip_tag(error, &nmrep);
3696 nfsm_chain_get_32(error, &nmrep, numops);
3697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3698 nfsmout_if(error);
3699 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3700 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3701 nfsmout_if(error);
3702 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3703 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3704 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3705
3706 /* Update the lock owner's stategenid once it appears the server has state for it. */
3707 /* We determine this by noting the request was successful (we got a stateid). */
3708 if (newlocker && !error) {
3709 nlop->nlo_stategenid = nmp->nm_stategenid;
3710 }
3711nfsmout:
3712 if (!lockerror) {
3713 nfs_node_unlock(np);
3714 }
3715 nfs_lock_owner_clear_busy(nlop);
3716 if (newlocker) {
3717 nfs_open_owner_clear_busy(nofp->nof_owner);
3718 nfs_open_file_clear_busy(nofp);
3719 }
3720 nfsm_chain_cleanup(&nmreq);
3721 nfsm_chain_cleanup(&nmrep);
3722 return error;
3723}
3724
3725/*
3726 * Send an NFSv4 LOCKU RPC to the server.
3727 */
3728int
3729nfs4_unlock_rpc(
3730 nfsnode_t np,
3731 struct nfs_lock_owner *nlop,
3732 int type,
3733 uint64_t start,
3734 uint64_t end,
3735 int flags,
3736 thread_t thd,
3737 kauth_cred_t cred)
3738{
3739 struct nfsmount *nmp;
3740 struct nfsm_chain nmreq, nmrep;
3741 uint64_t xid;
3742 int error = 0, lockerror = ENOENT, numops, status;
3743 struct nfsreq_secinfo_args si;
3744
3745 nmp = NFSTONMP(np);
3746 if (nfs_mount_gone(nmp)) {
3747 return ENXIO;
3748 }
3749 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3750 return EINVAL;
3751 }
3752
3753 error = nfs_lock_owner_set_busy(nlop, NULL);
3754 if (error) {
3755 return error;
3756 }
3757
3758 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3759 nfsm_chain_null(&nmreq);
3760 nfsm_chain_null(&nmrep);
3761
3762 // PUTFH, GETATTR, LOCKU
3763 numops = 3;
3764 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3765 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3766 numops--;
3767 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3768 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3769 numops--;
3770 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3771 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3772 numops--;
3773 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3774 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3775 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3776 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3777 nfsm_chain_add_64(error, &nmreq, start);
3778 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3779 nfsm_chain_build_done(error, &nmreq);
3780 nfsm_assert(error, (numops == 0), EPROTO);
3781 nfsmout_if(error);
3782
3783 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3784
3785 if ((lockerror = nfs_node_lock(np))) {
3786 error = lockerror;
3787 }
3788 nfsm_chain_skip_tag(error, &nmrep);
3789 nfsm_chain_get_32(error, &nmrep, numops);
3790 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3791 nfsmout_if(error);
3792 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3793 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3794 nfsmout_if(error);
3795 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3796 nfs_owner_seqid_increment(NULL, nlop, error);
3797 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3798nfsmout:
3799 if (!lockerror) {
3800 nfs_node_unlock(np);
3801 }
3802 nfs_lock_owner_clear_busy(nlop);
3803 nfsm_chain_cleanup(&nmreq);
3804 nfsm_chain_cleanup(&nmrep);
3805 return error;
3806}
3807
3808/*
3809 * Send an NFSv4 LOCKT RPC to the server.
3810 */
3811int
3812nfs4_getlock_rpc(
3813 nfsnode_t np,
3814 struct nfs_lock_owner *nlop,
3815 struct flock *fl,
3816 uint64_t start,
3817 uint64_t end,
3818 vfs_context_t ctx)
3819{
3820 struct nfsmount *nmp;
3821 struct nfsm_chain nmreq, nmrep;
3822 uint64_t xid, val64 = 0;
3823 uint32_t val = 0;
3824 int error = 0, lockerror, numops, status;
3825 struct nfsreq_secinfo_args si;
3826
3827 nmp = NFSTONMP(np);
3828 if (nfs_mount_gone(nmp)) {
3829 return ENXIO;
3830 }
3831 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3832 return EINVAL;
3833 }
3834
3835 lockerror = ENOENT;
3836 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3837 nfsm_chain_null(&nmreq);
3838 nfsm_chain_null(&nmrep);
3839
3840 // PUTFH, GETATTR, LOCKT
3841 numops = 3;
3842 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3843 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3844 numops--;
3845 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3846 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3847 numops--;
3848 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3849 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3850 numops--;
3851 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3852 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3853 nfsm_chain_add_64(error, &nmreq, start);
3854 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3855 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3856 nfsm_chain_build_done(error, &nmreq);
3857 nfsm_assert(error, (numops == 0), EPROTO);
3858 nfsmout_if(error);
3859
3860 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3861
3862 if ((lockerror = nfs_node_lock(np))) {
3863 error = lockerror;
3864 }
3865 nfsm_chain_skip_tag(error, &nmrep);
3866 nfsm_chain_get_32(error, &nmrep, numops);
3867 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3868 nfsmout_if(error);
3869 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3870 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3871 nfsmout_if(error);
3872 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3873 if (error == NFSERR_DENIED) {
3874 error = 0;
3875 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3876 nfsm_chain_get_64(error, &nmrep, val64);
3877 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3878 nfsm_chain_get_32(error, &nmrep, val);
3879 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3880 fl->l_pid = 0;
3881 fl->l_whence = SEEK_SET;
3882 } else if (!error) {
3883 fl->l_type = F_UNLCK;
3884 }
3885nfsmout:
3886 if (!lockerror) {
3887 nfs_node_unlock(np);
3888 }
3889 nfsm_chain_cleanup(&nmreq);
3890 nfsm_chain_cleanup(&nmrep);
3891 return error;
3892}
3893#endif /* CONFIG_NFS4 */
3894
3895/*
3896 * Check for any conflicts with the given lock.
3897 *
3898 * Checking for a lock doesn't require the file to be opened.
3899 * So we skip all the open owner, open file, lock owner work
3900 * and just check for a conflicting lock.
3901 */
3902int
3903nfs_advlock_getlock(
3904 nfsnode_t np,
3905 struct nfs_lock_owner *nlop,
3906 struct flock *fl,
3907 uint64_t start,
3908 uint64_t end,
3909 vfs_context_t ctx)
3910{
3911 struct nfsmount *nmp;
3912 struct nfs_file_lock *nflp;
3913 int error = 0, answered = 0;
3914
3915 nmp = NFSTONMP(np);
3916 if (nfs_mount_gone(nmp)) {
3917 return ENXIO;
3918 }
3919
3920restart:
3921 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3922 return error;
3923 }
3924
3925 lck_mtx_lock(&np->n_openlock);
3926 /* scan currently held locks for conflict */
3927 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3928 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3929 continue;
3930 }
3931 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3932 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3933 break;
3934 }
3935 }
3936 if (nflp) {
3937 /* found a conflicting lock */
3938 fl->l_type = nflp->nfl_type;
3939 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3940 fl->l_start = nflp->nfl_start;
3941 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3942 fl->l_whence = SEEK_SET;
3943 answered = 1;
3944 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3945 /*
3946 * If we have a write delegation, we know there can't be other
3947 * locks on the server. So the answer is no conflicting lock found.
3948 */
3949 fl->l_type = F_UNLCK;
3950 answered = 1;
3951 }
3952 lck_mtx_unlock(&np->n_openlock);
3953 if (answered) {
3954 nfs_mount_state_in_use_end(nmp, 0);
3955 return 0;
3956 }
3957
3958 /* no conflict found locally, so ask the server */
3959 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3960
3961 if (nfs_mount_state_in_use_end(nmp, error)) {
3962 goto restart;
3963 }
3964 return error;
3965}
3966
3967/*
3968 * Acquire a file lock for the given range.
3969 *
3970 * Add the lock (request) to the lock queue.
3971 * Scan the lock queue for any conflicting locks.
3972 * If a conflict is found, block or return an error.
3973 * Once end of queue is reached, send request to the server.
3974 * If the server grants the lock, scan the lock queue and
3975 * update any existing locks. Then (optionally) scan the
3976 * queue again to coalesce any locks adjacent to the new one.
3977 */
3978int
3979nfs_advlock_setlock(
3980 nfsnode_t np,
3981 struct nfs_open_file *nofp,
3982 struct nfs_lock_owner *nlop,
3983 int op,
3984 uint64_t start,
3985 uint64_t end,
3986 int style,
3987 short type,
3988 vfs_context_t ctx)
3989{
3990 struct nfsmount *nmp;
3991 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3992 struct nfs_file_lock *coalnflp;
3993 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3994 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
3995
3996 nmp = NFSTONMP(np);
3997 if (nfs_mount_gone(nmp)) {
3998 return ENXIO;
3999 }
4000 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4001
4002 if ((type != F_RDLCK) && (type != F_WRLCK)) {
4003 return EINVAL;
4004 }
4005
4006 /* allocate a new lock */
4007 newnflp = nfs_file_lock_alloc(nlop);
4008 if (!newnflp) {
4009 return ENOLCK;
4010 }
4011 newnflp->nfl_start = start;
4012 newnflp->nfl_end = end;
4013 newnflp->nfl_type = type;
4014 if (op == F_SETLKW) {
4015 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
4016 }
4017 newnflp->nfl_flags |= style;
4018 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
4019
4020 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
4021 /*
4022 * For exclusive flock-style locks, if we block waiting for the
4023 * lock, we need to first release any currently held shared
4024 * flock-style lock. So, the first thing we do is check if we
4025 * have a shared flock-style lock.
4026 */
4027 nflp = TAILQ_FIRST(&nlop->nlo_locks);
4028 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
4029 nflp = NULL;
4030 }
4031 if (nflp && (nflp->nfl_type != F_RDLCK)) {
4032 nflp = NULL;
4033 }
4034 flocknflp = nflp;
4035 }
4036
4037restart:
4038 restart = 0;
4039 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4040 if (error) {
4041 goto error_out;
4042 }
4043 inuse = 1;
4044 if (np->n_flag & NREVOKE) {
4045 error = EIO;
4046 nfs_mount_state_in_use_end(nmp, 0);
4047 inuse = 0;
4048 goto error_out;
4049 }
4050#if CONFIG_NFS4
4051 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
4054 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
4055 if (error) {
4056 goto error_out;
4057 }
4058 goto restart;
4059 }
4060#endif
4061
4062 lck_mtx_lock(&np->n_openlock);
4063 if (!inqueue) {
4064 /* insert new lock at beginning of list */
4065 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4066 inqueue = 1;
4067 }
4068
4069 /* scan current list of locks (held and pending) for conflicts */
4070 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4071 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4072 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
4073 continue;
4074 }
4075 /* Conflict */
4076 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4077 error = EAGAIN;
4078 break;
4079 }
4080 /* Block until this lock is no longer held. */
4081 if (nflp->nfl_blockcnt == UINT_MAX) {
4082 error = ENOLCK;
4083 break;
4084 }
4085 nflp->nfl_blockcnt++;
4086 do {
4087 if (flocknflp) {
4088 /* release any currently held shared lock before sleeping */
4089 lck_mtx_unlock(&np->n_openlock);
4090 nfs_mount_state_in_use_end(nmp, 0);
4091 inuse = 0;
4092 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4093 flocknflp = NULL;
4094 if (!error) {
4095 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4096 }
4097 if (error) {
4098 lck_mtx_lock(&np->n_openlock);
4099 break;
4100 }
4101 inuse = 1;
4102 lck_mtx_lock(&np->n_openlock);
4103 /* no need to block/sleep if the conflict is gone */
4104 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4105 break;
4106 }
4107 }
4108 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4109 slpflag = 0;
4110 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4111 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4112 /* looks like we have a recover pending... restart */
4113 restart = 1;
4114 lck_mtx_unlock(&np->n_openlock);
4115 nfs_mount_state_in_use_end(nmp, 0);
4116 inuse = 0;
4117 lck_mtx_lock(&np->n_openlock);
4118 break;
4119 }
4120 if (!error && (np->n_flag & NREVOKE)) {
4121 error = EIO;
4122 }
4123 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4124 nflp->nfl_blockcnt--;
4125 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4126 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4127 nfs_file_lock_destroy(nflp);
4128 }
4129 if (error || restart) {
4130 break;
4131 }
4132 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4133 /* So, start this lock-scanning loop over from where it started. */
4134 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4135 }
4136 lck_mtx_unlock(&np->n_openlock);
4137 if (restart) {
4138 goto restart;
4139 }
4140 if (error) {
4141 goto error_out;
4142 }
4143
4144 if (willsplit) {
4145 /*
4146 * It looks like this operation is splitting a lock.
4147 * We allocate a new lock now so we don't have to worry
4148 * about the allocation failing after we've updated some state.
4149 */
4150 nflp2 = nfs_file_lock_alloc(nlop);
4151 if (!nflp2) {
4152 error = ENOLCK;
4153 goto error_out;
4154 }
4155 }
4156
4157 /* once scan for local conflicts is clear, send request to server */
4158 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4159 goto error_out;
4160 }
4161 busy = 1;
4162 delay = 0;
4163 do {
4164#if CONFIG_NFS4
4165 /* do we have a delegation? (that we're not returning?) */
4166 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4167 if (np->n_openflags & N_DELEG_WRITE) {
4168 /* with a write delegation, just take the lock delegated */
4169 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4170 error = 0;
4171 /* make sure the lock owner knows its open owner */
4172 if (!nlop->nlo_open_owner) {
4173 nfs_open_owner_ref(nofp->nof_owner);
4174 nlop->nlo_open_owner = nofp->nof_owner;
4175 }
4176 break;
4177 } else {
4178 /*
4179 * If we don't have any non-delegated opens but we do have
4180 * delegated opens, then we need to first claim the delegated
4181 * opens so that the lock request on the server can be associated
4182 * with an open it knows about.
4183 */
4184 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4185 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4186 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4187 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4188 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4189 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4190 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4191 if (error) {
4192 break;
4193 }
4194 }
4195 }
4196 }
4197#endif
4198 if (np->n_flag & NREVOKE) {
4199 error = EIO;
4200 }
4201 if (!error) {
4202 if (busy) {
4203 nfs_open_state_clear_busy(np);
4204 busy = 0;
4205 }
4206 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4207 if (!busy && !nfs_open_state_set_busy(np, vfs_context_thread(ctx))) {
4208 busy = 1;
4209 }
4210 }
4211 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4212 break;
4213 }
4214 /* request was denied due to either conflict or grace period */
4215 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4216 error = EAGAIN;
4217 break;
4218 }
4219 if (flocknflp) {
4220 /* release any currently held shared lock before sleeping */
4221 nfs_open_state_clear_busy(np);
4222 busy = 0;
4223 if (inuse) {
4224 nfs_mount_state_in_use_end(nmp, 0);
4225 inuse = 0;
4226 }
4227 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4228 flocknflp = NULL;
4229 if (!error2) {
4230 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4231 }
4232 if (!error2) {
4233 inuse = 1;
4234 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4235 }
4236 if (error2) {
4237 error = error2;
4238 break;
4239 }
4240 busy = 1;
4241 }
4242 /*
4243 * Wait a little bit and send the request again.
4244 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4245 */
4246 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4247 if (error == NFSERR_GRACE) {
4248 delay = 4;
4249 }
4250 if (delay < 4) {
4251 delay++;
4252 }
4253 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4254 slpflag = 0;
4255 }
4256 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4257 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4258 /* looks like we have a recover pending... restart */
4259 nfs_open_state_clear_busy(np);
4260 busy = 0;
4261 if (inuse) {
4262 nfs_mount_state_in_use_end(nmp, 0);
4263 inuse = 0;
4264 }
4265 goto restart;
4266 }
4267 if (!error && (np->n_flag & NREVOKE)) {
4268 error = EIO;
4269 }
4270 } while (!error);
4271
4272error_out:
4273 if (nfs_mount_state_error_should_restart(error)) {
4274 /* looks like we need to restart this operation */
4275 if (busy) {
4276 nfs_open_state_clear_busy(np);
4277 busy = 0;
4278 }
4279 if (inuse) {
4280 nfs_mount_state_in_use_end(nmp, error);
4281 inuse = 0;
4282 }
4283 goto restart;
4284 }
4285 lck_mtx_lock(&np->n_openlock);
4286 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4287 if (error) {
4288 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4289 if (newnflp->nfl_blockcnt) {
4290 /* wake up anyone blocked on this lock */
4291 wakeup(newnflp);
4292 } else {
4293 /* remove newnflp from lock list and destroy */
4294 if (inqueue) {
4295 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4296 }
4297 nfs_file_lock_destroy(newnflp);
4298 }
4299 lck_mtx_unlock(&np->n_openlock);
4300 if (busy) {
4301 nfs_open_state_clear_busy(np);
4302 }
4303 if (inuse) {
4304 nfs_mount_state_in_use_end(nmp, error);
4305 }
4306 if (nflp2) {
4307 nfs_file_lock_destroy(nflp2);
4308 }
4309 return error;
4310 }
4311
4312 /* server granted the lock */
4313
4314 /*
4315 * Scan for locks to update.
4316 *
4317 * Locks completely covered are killed.
4318 * At most two locks may need to be clipped.
4319 * It's possible that a single lock may need to be split.
4320 */
4321 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4322 if (nflp == newnflp) {
4323 continue;
4324 }
4325 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4326 continue;
4327 }
4328 if (nflp->nfl_owner != nlop) {
4329 continue;
4330 }
4331 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4332 continue;
4333 }
4334 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4335 continue;
4336 }
4337 /* here's one to update */
4338 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4339 /* The entire lock is being replaced. */
4340 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4341 lck_mtx_lock(&nlop->nlo_lock);
4342 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4343 lck_mtx_unlock(&nlop->nlo_lock);
4344 /* lock will be destroyed below, if no waiters */
4345 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4346 /* We're replacing a range in the middle of a lock. */
4347 /* The current lock will be split into two locks. */
4348 /* Update locks and insert new lock after current lock. */
4349 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4350 nflp2->nfl_type = nflp->nfl_type;
4351 nflp2->nfl_start = newnflp->nfl_end + 1;
4352 nflp2->nfl_end = nflp->nfl_end;
4353 nflp->nfl_end = newnflp->nfl_start - 1;
4354 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4355 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4356 nextnflp = nflp2;
4357 nflp2 = NULL;
4358 } else if (newnflp->nfl_start > nflp->nfl_start) {
4359 /* We're replacing the end of a lock. */
4360 nflp->nfl_end = newnflp->nfl_start - 1;
4361 } else if (newnflp->nfl_end < nflp->nfl_end) {
4362 /* We're replacing the start of a lock. */
4363 nflp->nfl_start = newnflp->nfl_end + 1;
4364 }
4365 if (nflp->nfl_blockcnt) {
4366 /* wake up anyone blocked on this lock */
4367 wakeup(nflp);
4368 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4369 /* remove nflp from lock list and destroy */
4370 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4371 nfs_file_lock_destroy(nflp);
4372 }
4373 }
4374
4375 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4376
4377 /*
4378 * POSIX locks should be coalesced when possible.
4379 */
4380 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4381 /*
4382 * Walk through the lock queue and check each of our held locks with
4383 * the previous and next locks in the lock owner's "held lock list".
4384 * If the two locks can be coalesced, we merge the current lock into
4385 * the other (previous or next) lock. Merging this way makes sure that
4386 * lock ranges are always merged forward in the lock queue. This is
4387 * important because anyone blocked on the lock being "merged away"
4388 * will still need to block on that range and it will simply continue
4389 * checking locks that are further down the list.
4390 */
4391 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4392 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4393 continue;
4394 }
4395 if (nflp->nfl_owner != nlop) {
4396 continue;
4397 }
4398 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4399 continue;
4400 }
4401 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4402 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4403 (coalnflp->nfl_type == nflp->nfl_type) &&
4404 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4405 coalnflp->nfl_end = nflp->nfl_end;
4406 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4407 lck_mtx_lock(&nlop->nlo_lock);
4408 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4409 lck_mtx_unlock(&nlop->nlo_lock);
4410 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4411 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4412 (coalnflp->nfl_type == nflp->nfl_type) &&
4413 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4414 coalnflp->nfl_start = nflp->nfl_start;
4415 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4416 lck_mtx_lock(&nlop->nlo_lock);
4417 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4418 lck_mtx_unlock(&nlop->nlo_lock);
4419 }
4420 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4421 continue;
4422 }
4423 if (nflp->nfl_blockcnt) {
4424 /* wake up anyone blocked on this lock */
4425 wakeup(nflp);
4426 } else {
4427 /* remove nflp from lock list and destroy */
4428 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4429 nfs_file_lock_destroy(nflp);
4430 }
4431 }
4432 }
4433
4434 lck_mtx_unlock(&np->n_openlock);
4435 nfs_open_state_clear_busy(np);
4436
4437 if (inuse) {
4438 nfs_mount_state_in_use_end(nmp, error);
4439 }
4440 if (nflp2) {
4441 nfs_file_lock_destroy(nflp2);
4442 }
4443 return error;
4444}
4445
4446/*
4447 * Release all (same style) locks within the given range.
4448 */
4449int
4450nfs_advlock_unlock(
4451 nfsnode_t np,
4452 struct nfs_open_file *nofp
4453#if !CONFIG_NFS4
4454 __unused
4455#endif
4456 ,
4457 struct nfs_lock_owner *nlop,
4458 uint64_t start,
4459 uint64_t end,
4460 int style,
4461 vfs_context_t ctx)
4462{
4463 struct nfsmount *nmp;
4464 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4465 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4466
4467 nmp = NFSTONMP(np);
4468 if (nfs_mount_gone(nmp)) {
4469 return ENXIO;
4470 }
4471
4472restart:
4473 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4474 return error;
4475 }
4476#if CONFIG_NFS4
4477 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4478 nfs_mount_state_in_use_end(nmp, 0);
4479 error = nfs4_reopen(nofp, NULL);
4480 if (error) {
4481 return error;
4482 }
4483 goto restart;
4484 }
4485#endif
4486 if ((error = nfs_open_state_set_busy(np, NULL))) {
4487 nfs_mount_state_in_use_end(nmp, error);
4488 return error;
4489 }
4490
4491 lck_mtx_lock(&np->n_openlock);
4492 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4493 /*
4494 * We may need to allocate a new lock if an existing lock gets split.
4495 * So, we first scan the list to check for a split, and if there's
4496 * going to be one, we'll allocate one now.
4497 */
4498 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4499 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4500 continue;
4501 }
4502 if (nflp->nfl_owner != nlop) {
4503 continue;
4504 }
4505 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4506 continue;
4507 }
4508 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4509 continue;
4510 }
4511 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4512 willsplit = 1;
4513 break;
4514 }
4515 }
4516 if (willsplit) {
4517 lck_mtx_unlock(&np->n_openlock);
4518 nfs_open_state_clear_busy(np);
4519 nfs_mount_state_in_use_end(nmp, 0);
4520 newnflp = nfs_file_lock_alloc(nlop);
4521 if (!newnflp) {
4522 return ENOMEM;
4523 }
4524 goto restart;
4525 }
4526 }
4527
4528 /*
4529 * Free all of our locks in the given range.
4530 *
4531 * Note that this process requires sending requests to the server.
4532 * Because of this, we will release the n_openlock while performing
4533 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4534 * locks from changing underneath us. However, other entries in the
4535 * list may be removed. So we need to be careful walking the list.
4536 */
4537
4538 /*
4539 * Don't unlock ranges that are held by other-style locks.
4540 * If style is posix, don't send any unlock rpcs if flock is held.
4541 * If we unlock an flock, don't send unlock rpcs for any posix-style
4542 * ranges held - instead send unlocks for the ranges not held.
4543 */
4544 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4545 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4546 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4547 send_unlock_rpcs = 0;
4548 }
4549 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4550 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4551 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4552 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4553 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4554 uint64_t s = 0;
4555 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4556 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4557 while (!delegated && nflp) {
4558 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4559 /* unlock the range preceding this lock */
4560 lck_mtx_unlock(&np->n_openlock);
4561 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4562 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4563 if (nfs_mount_state_error_should_restart(error)) {
4564 nfs_open_state_clear_busy(np);
4565 nfs_mount_state_in_use_end(nmp, error);
4566 goto restart;
4567 }
4568 lck_mtx_lock(&np->n_openlock);
4569 if (error) {
4570 goto out;
4571 }
4572 s = nflp->nfl_end + 1;
4573 }
4574 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4575 }
4576 if (!delegated) {
4577 lck_mtx_unlock(&np->n_openlock);
4578 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4579 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4580 if (nfs_mount_state_error_should_restart(error)) {
4581 nfs_open_state_clear_busy(np);
4582 nfs_mount_state_in_use_end(nmp, error);
4583 goto restart;
4584 }
4585 lck_mtx_lock(&np->n_openlock);
4586 if (error) {
4587 goto out;
4588 }
4589 }
4590 send_unlock_rpcs = 0;
4591 }
4592
4593 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4594 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4595 continue;
4596 }
4597 if (nflp->nfl_owner != nlop) {
4598 continue;
4599 }
4600 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4601 continue;
4602 }
4603 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4604 continue;
4605 }
4606 /* here's one to unlock */
4607 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4608 /* The entire lock is being unlocked. */
4609 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4610 lck_mtx_unlock(&np->n_openlock);
4611 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4612 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4613 if (nfs_mount_state_error_should_restart(error)) {
4614 nfs_open_state_clear_busy(np);
4615 nfs_mount_state_in_use_end(nmp, error);
4616 goto restart;
4617 }
4618 lck_mtx_lock(&np->n_openlock);
4619 }
4620 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4621 if (error) {
4622 break;
4623 }
4624 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4625 lck_mtx_lock(&nlop->nlo_lock);
4626 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4627 lck_mtx_unlock(&nlop->nlo_lock);
4628 /* lock will be destroyed below, if no waiters */
4629 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4630 /* We're unlocking a range in the middle of a lock. */
4631 /* The current lock will be split into two locks. */
4632 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4633 lck_mtx_unlock(&np->n_openlock);
4634 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4635 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4636 if (nfs_mount_state_error_should_restart(error)) {
4637 nfs_open_state_clear_busy(np);
4638 nfs_mount_state_in_use_end(nmp, error);
4639 goto restart;
4640 }
4641 lck_mtx_lock(&np->n_openlock);
4642 }
4643 if (error) {
4644 break;
4645 }
4646 /* update locks and insert new lock after current lock */
4647 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4648 newnflp->nfl_type = nflp->nfl_type;
4649 newnflp->nfl_start = end + 1;
4650 newnflp->nfl_end = nflp->nfl_end;
4651 nflp->nfl_end = start - 1;
4652 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4653 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4654 nextnflp = newnflp;
4655 newnflp = NULL;
4656 } else if (start > nflp->nfl_start) {
4657 /* We're unlocking the end of a lock. */
4658 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4659 lck_mtx_unlock(&np->n_openlock);
4660 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4661 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4662 if (nfs_mount_state_error_should_restart(error)) {
4663 nfs_open_state_clear_busy(np);
4664 nfs_mount_state_in_use_end(nmp, error);
4665 goto restart;
4666 }
4667 lck_mtx_lock(&np->n_openlock);
4668 }
4669 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4670 if (error) {
4671 break;
4672 }
4673 nflp->nfl_end = start - 1;
4674 } else if (end < nflp->nfl_end) {
4675 /* We're unlocking the start of a lock. */
4676 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4677 lck_mtx_unlock(&np->n_openlock);
4678 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4679 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4680 if (nfs_mount_state_error_should_restart(error)) {
4681 nfs_open_state_clear_busy(np);
4682 nfs_mount_state_in_use_end(nmp, error);
4683 goto restart;
4684 }
4685 lck_mtx_lock(&np->n_openlock);
4686 }
4687 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4688 if (error) {
4689 break;
4690 }
4691 nflp->nfl_start = end + 1;
4692 }
4693 if (nflp->nfl_blockcnt) {
4694 /* wake up anyone blocked on this lock */
4695 wakeup(nflp);
4696 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4697 /* remove nflp from lock list and destroy */
4698 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4699 nfs_file_lock_destroy(nflp);
4700 }
4701 }
4702out:
4703 lck_mtx_unlock(&np->n_openlock);
4704 nfs_open_state_clear_busy(np);
4705 nfs_mount_state_in_use_end(nmp, 0);
4706
4707 if (newnflp) {
4708 nfs_file_lock_destroy(newnflp);
4709 }
4710 return error;
4711}
4712
4713/*
4714 * NFSv4 advisory file locking
4715 */
4716int
4717nfs_vnop_advlock(
4718 struct vnop_advlock_args /* {
4719 * struct vnodeop_desc *a_desc;
4720 * vnode_t a_vp;
4721 * caddr_t a_id;
4722 * int a_op;
4723 * struct flock *a_fl;
4724 * int a_flags;
4725 * vfs_context_t a_context;
4726 * } */*ap)
4727{
4728 vnode_t vp = ap->a_vp;
4729 nfsnode_t np = VTONFS(ap->a_vp);
4730 struct flock *fl = ap->a_fl;
4731 int op = ap->a_op;
4732 int flags = ap->a_flags;
4733 vfs_context_t ctx = ap->a_context;
4734 struct nfsmount *nmp;
4735 struct nfs_open_owner *noop = NULL;
4736 struct nfs_open_file *nofp = NULL;
4737 struct nfs_lock_owner *nlop = NULL;
4738 off_t lstart;
4739 uint64_t start, end;
4740 int error = 0, modified, style;
4741 enum vtype vtype;
4742#define OFF_MAX QUAD_MAX
4743
4744 nmp = VTONMP(ap->a_vp);
4745 if (nfs_mount_gone(nmp)) {
4746 return ENXIO;
4747 }
4748 lck_mtx_lock(&nmp->nm_lock);
4749 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4750 lck_mtx_unlock(&nmp->nm_lock);
4751 return ENOTSUP;
4752 }
4753 lck_mtx_unlock(&nmp->nm_lock);
4754
4755 if (np->n_flag & NREVOKE) {
4756 return EIO;
4757 }
4758 vtype = vnode_vtype(ap->a_vp);
4759 if (vtype == VDIR) { /* ignore lock requests on directories */
4760 return 0;
4761 }
4762 if (vtype != VREG) { /* anything other than regular files is invalid */
4763 return EINVAL;
4764 }
4765
4766 /* Convert the flock structure into a start and end. */
4767 switch (fl->l_whence) {
4768 case SEEK_SET:
4769 case SEEK_CUR:
4770 /*
4771 * Caller is responsible for adding any necessary offset
4772 * to fl->l_start when SEEK_CUR is used.
4773 */
4774 lstart = fl->l_start;
4775 break;
4776 case SEEK_END:
4777 /* need to flush, and refetch attributes to make */
4778 /* sure we have the correct end of file offset */
4779 if ((error = nfs_node_lock(np))) {
4780 return error;
4781 }
4782 modified = (np->n_flag & NMODIFIED);
4783 nfs_node_unlock(np);
4784 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4785 return error;
4786 }
4787 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4788 return error;
4789 }
4790 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4791 if ((np->n_size > OFF_MAX) ||
4792 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4793 error = EOVERFLOW;
4794 }
4795 lstart = np->n_size + fl->l_start;
4796 nfs_data_unlock(np);
4797 if (error) {
4798 return error;
4799 }
4800 break;
4801 default:
4802 return EINVAL;
4803 }
4804 if (lstart < 0) {
4805 return EINVAL;
4806 }
4807 start = lstart;
4808 if (fl->l_len == 0) {
4809 end = UINT64_MAX;
4810 } else if (fl->l_len > 0) {
4811 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4812 return EOVERFLOW;
4813 }
4814 end = start - 1 + fl->l_len;
4815 } else { /* l_len is negative */
4816 if ((lstart + fl->l_len) < 0) {
4817 return EINVAL;
4818 }
4819 end = start - 1;
4820 start += fl->l_len;
4821 }
4822 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4823 return EINVAL;
4824 }
4825
4826 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4827 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4828 return EINVAL;
4829 }
4830
4831 /* find the lock owner, alloc if not unlock */
4832 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4833 if (!nlop) {
4834 error = (op == F_UNLCK) ? 0 : ENOMEM;
4835 if (error) {
4836 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4837 }
4838 goto out;
4839 }
4840
4841 if (op == F_GETLK) {
4842 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4843 } else {
4844 /* find the open owner */
4845 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4846 if (!noop) {
4847 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4848 error = EPERM;
4849 goto out;
4850 }
4851 /* find the open file */
4852#if CONFIG_NFS4
4853restart:
4854#endif
4855 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4856 if (error) {
4857 error = EBADF;
4858 }
4859 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4860 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4861 error = EIO;
4862 }
4863#if CONFIG_NFS4
4864 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4865 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4866 nofp = NULL;
4867 if (!error) {
4868 goto restart;
4869 }
4870 }
4871#endif
4872 if (error) {
4873 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4874 goto out;
4875 }
4876 if (op == F_UNLCK) {
4877 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4878 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4879 if ((op == F_SETLK) && (flags & F_WAIT)) {
4880 op = F_SETLKW;
4881 }
4882 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4883 } else {
4884 /* not getlk, unlock or lock? */
4885 error = EINVAL;
4886 }
4887 }
4888
4889out:
4890 if (nlop) {
4891 nfs_lock_owner_rele(nlop);
4892 }
4893 if (noop) {
4894 nfs_open_owner_rele(noop);
4895 }
4896 return error;
4897}
4898
4899/*
4900 * Check if an open owner holds any locks on a file.
4901 */
4902int
4903nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4904{
4905 struct nfs_lock_owner *nlop;
4906
4907 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4908 if (nlop->nlo_open_owner != noop) {
4909 continue;
4910 }
4911 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4912 break;
4913 }
4914 }
4915 return nlop ? 1 : 0;
4916}
4917
4918#if CONFIG_NFS4
4919/*
4920 * Reopen simple (no deny, no locks) open state that was lost.
4921 */
4922int
4923nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4924{
4925 struct nfs_open_owner *noop = nofp->nof_owner;
4926 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4927 nfsnode_t np = nofp->nof_np;
4928 vnode_t vp = NFSTOV(np);
4929 vnode_t dvp = NULL;
4930 struct componentname cn;
4931 const char *vname = NULL;
4932 const char *name = NULL;
4933 uint32_t namelen;
4934 char smallname[128];
4935 char *filename = NULL;
4936 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4937 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4938
4939 lck_mtx_lock(&nofp->nof_lock);
4940 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4941 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4942 break;
4943 }
4944 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4945 slpflag = 0;
4946 }
4947 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4948 lck_mtx_unlock(&nofp->nof_lock);
4949 return error;
4950 }
4951 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4952 lck_mtx_unlock(&nofp->nof_lock);
4953
4954 nfs_node_lock_force(np);
4955 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4956 /*
4957 * The node's been sillyrenamed, so we need to use
4958 * the sillyrename directory/name to do the open.
4959 */
4960 struct nfs_sillyrename *nsp = np->n_sillyrename;
4961 dvp = NFSTOV(nsp->nsr_dnp);
4962 if ((error = vnode_get(dvp))) {
4963 dvp = NULLVP;
4964 nfs_node_unlock(np);
4965 goto out;
4966 }
4967 name = nsp->nsr_name;
4968 } else {
4969 /*
4970 * [sigh] We can't trust VFS to get the parent right for named
4971 * attribute nodes. (It likes to reparent the nodes after we've
4972 * created them.) Luckily we can probably get the right parent
4973 * from the n_parent we have stashed away.
4974 */
4975 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4976 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
4977 dvp = NULL;
4978 }
4979 if (!dvp) {
4980 dvp = vnode_getparent(vp);
4981 }
4982 vname = vnode_getname(vp);
4983 if (!dvp || !vname) {
4984 if (!error) {
4985 error = EIO;
4986 }
4987 nfs_node_unlock(np);
4988 goto out;
4989 }
4990 name = vname;
4991 }
4992 filename = &smallname[0];
4993 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4994 if (namelen >= sizeof(smallname)) {
4995 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
4996 if (!filename) {
4997 error = ENOMEM;
4998 goto out;
4999 }
5000 snprintf(filename, namelen + 1, "%s", name);
5001 }
5002 nfs_node_unlock(np);
5003 bzero(&cn, sizeof(cn));
5004 cn.cn_nameptr = filename;
5005 cn.cn_namelen = namelen;
5006
5007restart:
5008 done = 0;
5009 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
5010 goto out;
5011 }
5012
5013 if (nofp->nof_rw) {
5014 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
5015 }
5016 if (!error && nofp->nof_w) {
5017 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
5018 }
5019 if (!error && nofp->nof_r) {
5020 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
5021 }
5022
5023 if (nfs_mount_state_in_use_end(nmp, error)) {
5024 if (error == NFSERR_GRACE) {
5025 goto restart;
5026 }
5027 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
5028 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5029 error = 0;
5030 goto out;
5031 }
5032 done = 1;
5033out:
5034 if (error && (error != EINTR) && (error != ERESTART)) {
5035 nfs_revoke_open_state_for_node(np);
5036 }
5037 lck_mtx_lock(&nofp->nof_lock);
5038 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
5039 if (done) {
5040 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5041 } else if (error) {
5042 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
5043 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5044 }
5045 lck_mtx_unlock(&nofp->nof_lock);
5046 if (filename && (filename != &smallname[0])) {
5047 FREE(filename, M_TEMP);
5048 }
5049 if (vname) {
5050 vnode_putname(vname);
5051 }
5052 if (dvp != NULLVP) {
5053 vnode_put(dvp);
5054 }
5055 return error;
5056}
5057
5058/*
5059 * Send a normal OPEN RPC to open/create a file.
5060 */
5061int
5062nfs4_open_rpc(
5063 struct nfs_open_file *nofp,
5064 vfs_context_t ctx,
5065 struct componentname *cnp,
5066 struct vnode_attr *vap,
5067 vnode_t dvp,
5068 vnode_t *vpp,
5069 int create,
5070 int share_access,
5071 int share_deny)
5072{
5073 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5074 cnp, vap, dvp, vpp, create, share_access, share_deny);
5075}
5076
5077/*
5078 * Send an OPEN RPC to reopen a file.
5079 */
5080int
5081nfs4_open_reopen_rpc(
5082 struct nfs_open_file *nofp,
5083 thread_t thd,
5084 kauth_cred_t cred,
5085 struct componentname *cnp,
5086 vnode_t dvp,
5087 vnode_t *vpp,
5088 int share_access,
5089 int share_deny)
5090{
5091 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
5092}
5093
5094/*
5095 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5096 */
5097int
5098nfs4_open_confirm_rpc(
5099 struct nfsmount *nmp,
5100 nfsnode_t dnp,
5101 u_char *fhp,
5102 int fhlen,
5103 struct nfs_open_owner *noop,
5104 nfs_stateid *sid,
5105 thread_t thd,
5106 kauth_cred_t cred,
5107 struct nfs_vattr *nvap,
5108 uint64_t *xidp)
5109{
5110 struct nfsm_chain nmreq, nmrep;
5111 int error = 0, status, numops;
5112 struct nfsreq_secinfo_args si;
5113
5114 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5115 nfsm_chain_null(&nmreq);
5116 nfsm_chain_null(&nmrep);
5117
5118 // PUTFH, OPEN_CONFIRM, GETATTR
5119 numops = 3;
5120 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5121 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5122 numops--;
5123 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5124 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5125 numops--;
5126 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5127 nfsm_chain_add_stateid(error, &nmreq, sid);
5128 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5129 numops--;
5130 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5131 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5132 nfsm_chain_build_done(error, &nmreq);
5133 nfsm_assert(error, (numops == 0), EPROTO);
5134 nfsmout_if(error);
5135 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5136
5137 nfsm_chain_skip_tag(error, &nmrep);
5138 nfsm_chain_get_32(error, &nmrep, numops);
5139 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5140 nfsmout_if(error);
5141 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5142 nfs_owner_seqid_increment(noop, NULL, error);
5143 nfsm_chain_get_stateid(error, &nmrep, sid);
5144 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5145 nfsmout_if(error);
5146 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5147nfsmout:
5148 nfsm_chain_cleanup(&nmreq);
5149 nfsm_chain_cleanup(&nmrep);
5150 return error;
5151}
5152
5153/*
5154 * common OPEN RPC code
5155 *
5156 * If create is set, ctx must be passed in.
5157 * Returns a node on success if no node passed in.
5158 */
5159int
5160nfs4_open_rpc_internal(
5161 struct nfs_open_file *nofp,
5162 vfs_context_t ctx,
5163 thread_t thd,
5164 kauth_cred_t cred,
5165 struct componentname *cnp,
5166 struct vnode_attr *vap,
5167 vnode_t dvp,
5168 vnode_t *vpp,
5169 int create,
5170 int share_access,
5171 int share_deny)
5172{
5173 struct nfsmount *nmp;
5174 struct nfs_open_owner *noop = nofp->nof_owner;
5175 struct nfs_vattr *nvattr;
5176 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5177 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
5178 u_int64_t xid, savedxid = 0;
5179 nfsnode_t dnp = VTONFS(dvp);
5180 nfsnode_t np, newnp = NULL;
5181 vnode_t newvp = NULL;
5182 struct nfsm_chain nmreq, nmrep;
5183 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5184 uint32_t rflags, delegation, recall;
5185 struct nfs_stateid stateid, dstateid, *sid;
5186 fhandle_t *fh;
5187 struct nfsreq *req;
5188 struct nfs_dulookup *dul;
5189 char sbuf[64], *s;
5190 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5191 struct kauth_ace ace;
5192 struct nfsreq_secinfo_args si;
5193
5194 if (create && !ctx) {
5195 return EINVAL;
5196 }
5197
5198 nmp = VTONMP(dvp);
5199 if (nfs_mount_gone(nmp)) {
5200 return ENXIO;
5201 }
5202 nfsvers = nmp->nm_vers;
5203 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5204 bzero(&dstateid, sizeof(dstateid));
5205 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5206 return EINVAL;
5207 }
5208
5209 np = *vpp ? VTONFS(*vpp) : NULL;
5210 if (create && vap) {
5211 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5212 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5213 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5214 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5215 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5216 vap->va_vaflags |= VA_UTIMES_NULL;
5217 }
5218 } else {
5219 exclusive = gotuid = gotgid = 0;
5220 }
5221 if (nofp) {
5222 sid = &nofp->nof_stateid;
5223 } else {
5224 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5225 sid = &stateid;
5226 }
5227
5228 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5229 return error;
5230 }
5231
5232 fh = zalloc(nfs_fhandle_zone);
5233 req = zalloc(nfs_req_zone);
5234 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
5235 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5236
5237again:
5238 rflags = delegation = recall = 0;
5239 ace.ace_flags = 0;
5240 s = sbuf;
5241 slen = sizeof(sbuf);
5242 NVATTR_INIT(nvattr);
5243 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5244
5245 nfsm_chain_null(&nmreq);
5246 nfsm_chain_null(&nmrep);
5247
5248 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5249 numops = 6;
5250 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5251 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5252 numops--;
5253 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5254 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5255 numops--;
5256 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5257 numops--;
5258 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5259 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5260 nfsm_chain_add_32(error, &nmreq, share_access);
5261 nfsm_chain_add_32(error, &nmreq, share_deny);
5262 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
5263 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5264 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
5265 nfsm_chain_add_32(error, &nmreq, create);
5266 if (create) {
5267 if (exclusive) {
5268 static uint32_t create_verf; // XXX need a better verifier
5269 create_verf++;
5270 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5271 /* insert 64 bit verifier */
5272 nfsm_chain_add_32(error, &nmreq, create_verf);
5273 nfsm_chain_add_32(error, &nmreq, create_verf);
5274 } else {
5275 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5276 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5277 }
5278 }
5279 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5280 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5281 numops--;
5282 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5283 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5284 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5285 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5286 numops--;
5287 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5288 numops--;
5289 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5290 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5291 nfsm_chain_build_done(error, &nmreq);
5292 nfsm_assert(error, (numops == 0), EPROTO);
5293 if (!error) {
5294 error = busyerror = nfs_node_set_busy(dnp, thd);
5295 }
5296 nfsmout_if(error);
5297
5298 if (create && !namedattrs) {
5299 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5300 }
5301
5302 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
5303 if (!error) {
5304 if (create && !namedattrs) {
5305 nfs_dulookup_start(dul, dnp, ctx);
5306 }
5307 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5308 savedxid = xid;
5309 }
5310
5311 if (create && !namedattrs) {
5312 nfs_dulookup_finish(dul, dnp, ctx);
5313 }
5314
5315 if ((lockerror = nfs_node_lock(dnp))) {
5316 error = lockerror;
5317 }
5318 nfsm_chain_skip_tag(error, &nmrep);
5319 nfsm_chain_get_32(error, &nmrep, numops);
5320 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5321 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5322 nfsmout_if(error);
5323 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5324 nfs_owner_seqid_increment(noop, NULL, error);
5325 nfsm_chain_get_stateid(error, &nmrep, sid);
5326 nfsm_chain_check_change_info(error, &nmrep, dnp);
5327 nfsm_chain_get_32(error, &nmrep, rflags);
5328 bmlen = NFS_ATTR_BITMAP_LEN;
5329 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5330 nfsm_chain_get_32(error, &nmrep, delegation);
5331 if (!error) {
5332 switch (delegation) {
5333 case NFS_OPEN_DELEGATE_NONE:
5334 break;
5335 case NFS_OPEN_DELEGATE_READ:
5336 case NFS_OPEN_DELEGATE_WRITE:
5337 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5338 nfsm_chain_get_32(error, &nmrep, recall);
5339 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5340 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5341 }
5342 /* if we have any trouble accepting the ACE, just invalidate it */
5343 ace_type = ace_flags = ace_mask = len = 0;
5344 nfsm_chain_get_32(error, &nmrep, ace_type);
5345 nfsm_chain_get_32(error, &nmrep, ace_flags);
5346 nfsm_chain_get_32(error, &nmrep, ace_mask);
5347 nfsm_chain_get_32(error, &nmrep, len);
5348 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5349 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5350 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5351 if (!error && (len >= slen)) {
5352 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5353 if (s) {
5354 slen = len + 1;
5355 } else {
5356 ace.ace_flags = 0;
5357 }
5358 }
5359 if (s) {
5360 nfsm_chain_get_opaque(error, &nmrep, len, s);
5361 } else {
5362 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5363 }
5364 if (!error && s) {
5365 s[len] = '\0';
5366 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5367 ace.ace_flags = 0;
5368 }
5369 }
5370 if (error || !s) {
5371 ace.ace_flags = 0;
5372 }
5373 if (s && (s != sbuf)) {
5374 FREE(s, M_TEMP);
5375 }
5376 break;
5377 default:
5378 error = EBADRPC;
5379 break;
5380 }
5381 }
5382 /* At this point if we have no error, the object was created/opened. */
5383 open_error = error;
5384 nfsmout_if(error);
5385 if (create && vap && !exclusive) {
5386 nfs_vattr_set_supported(bitmap, vap);
5387 }
5388 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5389 nfsmout_if(error);
5390 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5391 nfsmout_if(error);
5392 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5393 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5394 error = EBADRPC;
5395 goto nfsmout;
5396 }
5397 if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5398 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5399 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5400 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5401 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5402 }
5403 }
5404 /* directory attributes: if we don't get them, make sure to invalidate */
5405 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5406 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5407 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5408 if (error) {
5409 NATTRINVALIDATE(dnp);
5410 }
5411 nfsmout_if(error);
5412
5413 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5414 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5415 }
5416
5417 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5418 nfs_node_unlock(dnp);
5419 lockerror = ENOENT;
5420 NVATTR_CLEANUP(nvattr);
5421 error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid);
5422 nfsmout_if(error);
5423 savedxid = xid;
5424 if ((lockerror = nfs_node_lock(dnp))) {
5425 error = lockerror;
5426 }
5427 }
5428
5429nfsmout:
5430 nfsm_chain_cleanup(&nmreq);
5431 nfsm_chain_cleanup(&nmrep);
5432
5433 if (!lockerror && create) {
5434 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5435 dnp->n_flag &= ~NNEGNCENTRIES;
5436 cache_purge_negatives(dvp);
5437 }
5438 dnp->n_flag |= NMODIFIED;
5439 nfs_node_unlock(dnp);
5440 lockerror = ENOENT;
5441 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5442 }
5443 if (!lockerror) {
5444 nfs_node_unlock(dnp);
5445 }
5446 if (!error && !np && fh->fh_len) {
5447 /* create the vnode with the filehandle and attributes */
5448 xid = savedxid;
5449 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp);
5450 if (!error) {
5451 newvp = NFSTOV(newnp);
5452 }
5453 }
5454 NVATTR_CLEANUP(nvattr);
5455 if (!busyerror) {
5456 nfs_node_clear_busy(dnp);
5457 }
5458 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5459 if (!np) {
5460 np = newnp;
5461 }
5462 if (!error && np && !recall) {
5463 /* stuff the delegation state in the node */
5464 lck_mtx_lock(&np->n_openlock);
5465 np->n_openflags &= ~N_DELEG_MASK;
5466 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5467 np->n_dstateid = dstateid;
5468 np->n_dace = ace;
5469 if (np->n_dlink.tqe_next == NFSNOLIST) {
5470 lck_mtx_lock(&nmp->nm_lock);
5471 if (np->n_dlink.tqe_next == NFSNOLIST) {
5472 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5473 }
5474 lck_mtx_unlock(&nmp->nm_lock);
5475 }
5476 lck_mtx_unlock(&np->n_openlock);
5477 } else {
5478 /* give the delegation back */
5479 if (np) {
5480 if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5481 /* update delegation state and return it */
5482 lck_mtx_lock(&np->n_openlock);
5483 np->n_openflags &= ~N_DELEG_MASK;
5484 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5485 np->n_dstateid = dstateid;
5486 np->n_dace = ace;
5487 if (np->n_dlink.tqe_next == NFSNOLIST) {
5488 lck_mtx_lock(&nmp->nm_lock);
5489 if (np->n_dlink.tqe_next == NFSNOLIST) {
5490 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5491 }
5492 lck_mtx_unlock(&nmp->nm_lock);
5493 }
5494 lck_mtx_unlock(&np->n_openlock);
5495 /* don't need to send a separate delegreturn for fh */
5496 fh->fh_len = 0;
5497 }
5498 /* return np's current delegation */
5499 nfs4_delegation_return(np, 0, thd, cred);
5500 }
5501 if (fh->fh_len) { /* return fh's delegation if it wasn't for np */
5502 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
5503 }
5504 }
5505 }
5506 if (error) {
5507 if (exclusive && (error == NFSERR_NOTSUPP)) {
5508 exclusive = 0;
5509 goto again;
5510 }
5511 if (newvp) {
5512 nfs_node_unlock(newnp);
5513 vnode_put(newvp);
5514 }
5515 } else if (create) {
5516 nfs_node_unlock(newnp);
5517 if (exclusive) {
5518 error = nfs4_setattr_rpc(newnp, vap, ctx);
5519 if (error && (gotuid || gotgid)) {
5520 /* it's possible the server didn't like our attempt to set IDs. */
5521 /* so, let's try it again without those */
5522 VATTR_CLEAR_ACTIVE(vap, va_uid);
5523 VATTR_CLEAR_ACTIVE(vap, va_gid);
5524 error = nfs4_setattr_rpc(newnp, vap, ctx);
5525 }
5526 }
5527 if (error) {
5528 vnode_put(newvp);
5529 } else {
5530 *vpp = newvp;
5531 }
5532 }
5533 nfs_open_owner_clear_busy(noop);
5534 NFS_ZFREE(nfs_fhandle_zone, fh);
5535 NFS_ZFREE(nfs_req_zone, req);
5536 FREE(dul, M_TEMP);
5537 FREE(nvattr, M_TEMP);
5538 return error;
5539}
5540
5541
5542/*
5543 * Send an OPEN RPC to claim a delegated open for a file
5544 */
5545int
5546nfs4_claim_delegated_open_rpc(
5547 struct nfs_open_file *nofp,
5548 int share_access,
5549 int share_deny,
5550 int flags)
5551{
5552 struct nfsmount *nmp;
5553 struct nfs_open_owner *noop = nofp->nof_owner;
5554 struct nfs_vattr *nvattr;
5555 int error = 0, lockerror = ENOENT, status;
5556 int nfsvers, numops;
5557 u_int64_t xid;
5558 nfsnode_t np = nofp->nof_np;
5559 struct nfsm_chain nmreq, nmrep;
5560 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5561 uint32_t rflags = 0, delegation, recall = 0;
5562 fhandle_t *fh;
5563 struct nfs_stateid dstateid;
5564 char sbuf[64], *s = sbuf;
5565 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5566 struct kauth_ace ace;
5567 vnode_t dvp = NULL;
5568 const char *vname = NULL;
5569 const char *name = NULL;
5570 uint32_t namelen;
5571 char smallname[128];
5572 char *filename = NULL;
5573 struct nfsreq_secinfo_args si;
5574
5575 nmp = NFSTONMP(np);
5576 if (nfs_mount_gone(nmp)) {
5577 return ENXIO;
5578 }
5579 fh = zalloc(nfs_fhandle_zone);
5580 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5581 nfsvers = nmp->nm_vers;
5582
5583 nfs_node_lock_force(np);
5584 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5585 /*
5586 * The node's been sillyrenamed, so we need to use
5587 * the sillyrename directory/name to do the open.
5588 */
5589 struct nfs_sillyrename *nsp = np->n_sillyrename;
5590 dvp = NFSTOV(nsp->nsr_dnp);
5591 if ((error = vnode_get(dvp))) {
5592 dvp = NULLVP;
5593 nfs_node_unlock(np);
5594 goto out;
5595 }
5596 name = nsp->nsr_name;
5597 } else {
5598 /*
5599 * [sigh] We can't trust VFS to get the parent right for named
5600 * attribute nodes. (It likes to reparent the nodes after we've
5601 * created them.) Luckily we can probably get the right parent
5602 * from the n_parent we have stashed away.
5603 */
5604 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5605 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5606 dvp = NULL;
5607 }
5608 if (!dvp) {
5609 dvp = vnode_getparent(NFSTOV(np));
5610 }
5611 vname = vnode_getname(NFSTOV(np));
5612 if (!dvp || !vname) {
5613 if (!error) {
5614 error = EIO;
5615 }
5616 nfs_node_unlock(np);
5617 goto out;
5618 }
5619 name = vname;
5620 }
5621 filename = &smallname[0];
5622 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5623 if (namelen >= sizeof(smallname)) {
5624 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
5625 if (!filename) {
5626 error = ENOMEM;
5627 nfs_node_unlock(np);
5628 goto out;
5629 }
5630 snprintf(filename, namelen + 1, "%s", name);
5631 }
5632 nfs_node_unlock(np);
5633
5634 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5635 goto out;
5636 }
5637 NVATTR_INIT(nvattr);
5638 delegation = NFS_OPEN_DELEGATE_NONE;
5639 dstateid = np->n_dstateid;
5640 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5641
5642 nfsm_chain_null(&nmreq);
5643 nfsm_chain_null(&nmrep);
5644
5645 // PUTFH, OPEN, GETATTR(FH)
5646 numops = 3;
5647 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5648 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5649 numops--;
5650 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5651 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5652 numops--;
5653 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5654 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5655 nfsm_chain_add_32(error, &nmreq, share_access);
5656 nfsm_chain_add_32(error, &nmreq, share_deny);
5657 // open owner: clientid + uid
5658 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5659 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5660 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5661 // openflag4
5662 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5663 // open_claim4
5664 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5665 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5666 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5667 numops--;
5668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5669 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5670 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5671 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5672 nfsm_chain_build_done(error, &nmreq);
5673 nfsm_assert(error, (numops == 0), EPROTO);
5674 nfsmout_if(error);
5675
5676 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5677 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5678
5679 if ((lockerror = nfs_node_lock(np))) {
5680 error = lockerror;
5681 }
5682 nfsm_chain_skip_tag(error, &nmrep);
5683 nfsm_chain_get_32(error, &nmrep, numops);
5684 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5685 nfsmout_if(error);
5686 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5687 nfs_owner_seqid_increment(noop, NULL, error);
5688 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5689 nfsm_chain_check_change_info(error, &nmrep, np);
5690 nfsm_chain_get_32(error, &nmrep, rflags);
5691 bmlen = NFS_ATTR_BITMAP_LEN;
5692 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5693 nfsm_chain_get_32(error, &nmrep, delegation);
5694 if (!error) {
5695 switch (delegation) {
5696 case NFS_OPEN_DELEGATE_NONE:
5697 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5698 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5699 break;
5700 case NFS_OPEN_DELEGATE_READ:
5701 case NFS_OPEN_DELEGATE_WRITE:
5702 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5703 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5704 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5705 (delegation == NFS_OPEN_DELEGATE_READ))) {
5706 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5707 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5708 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5709 }
5710 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5711 nfsm_chain_get_32(error, &nmrep, recall);
5712 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5713 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5714 }
5715 /* if we have any trouble accepting the ACE, just invalidate it */
5716 ace_type = ace_flags = ace_mask = len = 0;
5717 nfsm_chain_get_32(error, &nmrep, ace_type);
5718 nfsm_chain_get_32(error, &nmrep, ace_flags);
5719 nfsm_chain_get_32(error, &nmrep, ace_mask);
5720 nfsm_chain_get_32(error, &nmrep, len);
5721 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5722 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5723 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5724 if (!error && (len >= slen)) {
5725 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5726 if (s) {
5727 slen = len + 1;
5728 } else {
5729 ace.ace_flags = 0;
5730 }
5731 }
5732 if (s) {
5733 nfsm_chain_get_opaque(error, &nmrep, len, s);
5734 } else {
5735 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5736 }
5737 if (!error && s) {
5738 s[len] = '\0';
5739 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5740 ace.ace_flags = 0;
5741 }
5742 }
5743 if (error || !s) {
5744 ace.ace_flags = 0;
5745 }
5746 if (s && (s != sbuf)) {
5747 FREE(s, M_TEMP);
5748 }
5749 if (!error) {
5750 /* stuff the latest delegation state in the node */
5751 lck_mtx_lock(&np->n_openlock);
5752 np->n_openflags &= ~N_DELEG_MASK;
5753 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5754 np->n_dstateid = dstateid;
5755 np->n_dace = ace;
5756 if (np->n_dlink.tqe_next == NFSNOLIST) {
5757 lck_mtx_lock(&nmp->nm_lock);
5758 if (np->n_dlink.tqe_next == NFSNOLIST) {
5759 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5760 }
5761 lck_mtx_unlock(&nmp->nm_lock);
5762 }
5763 lck_mtx_unlock(&np->n_openlock);
5764 }
5765 break;
5766 default:
5767 error = EBADRPC;
5768 break;
5769 }
5770 }
5771 nfsmout_if(error);
5772 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5773 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5774 nfsmout_if(error);
5775 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5776 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5777 error = EBADRPC;
5778 goto nfsmout;
5779 }
5780 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5781 // XXX what if fh doesn't match the vnode we think we're re-opening?
5782 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5783 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5784 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5785 }
5786 }
5787 error = nfs_loadattrcache(np, nvattr, &xid, 1);
5788 nfsmout_if(error);
5789 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5790 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5791 }
5792nfsmout:
5793 NVATTR_CLEANUP(nvattr);
5794 FREE(nvattr, M_TEMP);
5795 NFS_ZFREE(nfs_fhandle_zone, fh);
5796 nfsm_chain_cleanup(&nmreq);
5797 nfsm_chain_cleanup(&nmrep);
5798 if (!lockerror) {
5799 nfs_node_unlock(np);
5800 }
5801 nfs_open_owner_clear_busy(noop);
5802 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5803 if (recall) {
5804 /*
5805 * We're making a delegated claim.
5806 * Don't return the delegation here in case we have more to claim.
5807 * Just make sure it's queued up to be returned.
5808 */
5809 nfs4_delegation_return_enqueue(np);
5810 }
5811 }
5812out:
5813 // if (!error)
5814 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5815 if (filename && (filename != &smallname[0])) {
5816 FREE(filename, M_TEMP);
5817 }
5818 if (vname) {
5819 vnode_putname(vname);
5820 }
5821 if (dvp != NULLVP) {
5822 vnode_put(dvp);
5823 }
5824 return error;
5825}
5826
5827/*
5828 * Send an OPEN RPC to reclaim an open file.
5829 */
5830int
5831nfs4_open_reclaim_rpc(
5832 struct nfs_open_file *nofp,
5833 int share_access,
5834 int share_deny)
5835{
5836 struct nfsmount *nmp;
5837 struct nfs_open_owner *noop = nofp->nof_owner;
5838 struct nfs_vattr *nvattr;
5839 int error = 0, lockerror = ENOENT, status;
5840 int nfsvers, numops;
5841 u_int64_t xid;
5842 nfsnode_t np = nofp->nof_np;
5843 struct nfsm_chain nmreq, nmrep;
5844 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5845 uint32_t rflags = 0, delegation, recall = 0;
5846 fhandle_t *fh;
5847 struct nfs_stateid dstateid;
5848 char sbuf[64], *s = sbuf;
5849 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5850 struct kauth_ace ace;
5851 struct nfsreq_secinfo_args si;
5852
5853 nmp = NFSTONMP(np);
5854 if (nfs_mount_gone(nmp)) {
5855 return ENXIO;
5856 }
5857 nfsvers = nmp->nm_vers;
5858
5859 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5860 return error;
5861 }
5862
5863 fh = zalloc(nfs_fhandle_zone);
5864 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5865 NVATTR_INIT(nvattr);
5866 delegation = NFS_OPEN_DELEGATE_NONE;
5867 dstateid = np->n_dstateid;
5868 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5869
5870 nfsm_chain_null(&nmreq);
5871 nfsm_chain_null(&nmrep);
5872
5873 // PUTFH, OPEN, GETATTR(FH)
5874 numops = 3;
5875 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5876 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5877 numops--;
5878 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5879 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5880 numops--;
5881 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5882 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5883 nfsm_chain_add_32(error, &nmreq, share_access);
5884 nfsm_chain_add_32(error, &nmreq, share_deny);
5885 // open owner: clientid + uid
5886 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5887 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5888 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5889 // openflag4
5890 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5891 // open_claim4
5892 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5893 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5894 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5895 NFS_OPEN_DELEGATE_NONE;
5896 nfsm_chain_add_32(error, &nmreq, delegation);
5897 delegation = NFS_OPEN_DELEGATE_NONE;
5898 numops--;
5899 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5900 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5901 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5902 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5903 nfsm_chain_build_done(error, &nmreq);
5904 nfsm_assert(error, (numops == 0), EPROTO);
5905 nfsmout_if(error);
5906
5907 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5908 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5909
5910 if ((lockerror = nfs_node_lock(np))) {
5911 error = lockerror;
5912 }
5913 nfsm_chain_skip_tag(error, &nmrep);
5914 nfsm_chain_get_32(error, &nmrep, numops);
5915 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5916 nfsmout_if(error);
5917 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5918 nfs_owner_seqid_increment(noop, NULL, error);
5919 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5920 nfsm_chain_check_change_info(error, &nmrep, np);
5921 nfsm_chain_get_32(error, &nmrep, rflags);
5922 bmlen = NFS_ATTR_BITMAP_LEN;
5923 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5924 nfsm_chain_get_32(error, &nmrep, delegation);
5925 if (!error) {
5926 switch (delegation) {
5927 case NFS_OPEN_DELEGATE_NONE:
5928 if (np->n_openflags & N_DELEG_MASK) {
5929 /*
5930 * Hey! We were supposed to get our delegation back even
5931 * if it was getting immediately recalled. Bad server!
5932 *
5933 * Just try to return the existing delegation.
5934 */
5935 // NP(np, "nfs: open reclaim didn't return delegation?");
5936 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5937 recall = 1;
5938 }
5939 break;
5940 case NFS_OPEN_DELEGATE_READ:
5941 case NFS_OPEN_DELEGATE_WRITE:
5942 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5943 nfsm_chain_get_32(error, &nmrep, recall);
5944 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5945 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5946 }
5947 /* if we have any trouble accepting the ACE, just invalidate it */
5948 ace_type = ace_flags = ace_mask = len = 0;
5949 nfsm_chain_get_32(error, &nmrep, ace_type);
5950 nfsm_chain_get_32(error, &nmrep, ace_flags);
5951 nfsm_chain_get_32(error, &nmrep, ace_mask);
5952 nfsm_chain_get_32(error, &nmrep, len);
5953 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5954 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5955 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5956 if (!error && (len >= slen)) {
5957 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5958 if (s) {
5959 slen = len + 1;
5960 } else {
5961 ace.ace_flags = 0;
5962 }
5963 }
5964 if (s) {
5965 nfsm_chain_get_opaque(error, &nmrep, len, s);
5966 } else {
5967 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5968 }
5969 if (!error && s) {
5970 s[len] = '\0';
5971 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5972 ace.ace_flags = 0;
5973 }
5974 }
5975 if (error || !s) {
5976 ace.ace_flags = 0;
5977 }
5978 if (s && (s != sbuf)) {
5979 FREE(s, M_TEMP);
5980 }
5981 if (!error) {
5982 /* stuff the delegation state in the node */
5983 lck_mtx_lock(&np->n_openlock);
5984 np->n_openflags &= ~N_DELEG_MASK;
5985 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5986 np->n_dstateid = dstateid;
5987 np->n_dace = ace;
5988 if (np->n_dlink.tqe_next == NFSNOLIST) {
5989 lck_mtx_lock(&nmp->nm_lock);
5990 if (np->n_dlink.tqe_next == NFSNOLIST) {
5991 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5992 }
5993 lck_mtx_unlock(&nmp->nm_lock);
5994 }
5995 lck_mtx_unlock(&np->n_openlock);
5996 }
5997 break;
5998 default:
5999 error = EBADRPC;
6000 break;
6001 }
6002 }
6003 nfsmout_if(error);
6004 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6005 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
6006 nfsmout_if(error);
6007 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6008 NP(np, "nfs: open reclaim didn't return filehandle?");
6009 error = EBADRPC;
6010 goto nfsmout;
6011 }
6012 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6013 // XXX what if fh doesn't match the vnode we think we're re-opening?
6014 // That should be pretty hard in this case, given that we are doing
6015 // the open reclaim using the file handle (and not a dir/name pair).
6016 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
6017 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6018 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
6019 }
6020 }
6021 error = nfs_loadattrcache(np, nvattr, &xid, 1);
6022 nfsmout_if(error);
6023 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6024 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6025 }
6026nfsmout:
6027 // if (!error)
6028 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6029 NVATTR_CLEANUP(nvattr);
6030 FREE(nvattr, M_TEMP);
6031 NFS_ZFREE(nfs_fhandle_zone, fh);
6032 nfsm_chain_cleanup(&nmreq);
6033 nfsm_chain_cleanup(&nmrep);
6034 if (!lockerror) {
6035 nfs_node_unlock(np);
6036 }
6037 nfs_open_owner_clear_busy(noop);
6038 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
6039 if (recall) {
6040 nfs4_delegation_return_enqueue(np);
6041 }
6042 }
6043 return error;
6044}
6045
6046int
6047nfs4_open_downgrade_rpc(
6048 nfsnode_t np,
6049 struct nfs_open_file *nofp,
6050 vfs_context_t ctx)
6051{
6052 struct nfs_open_owner *noop = nofp->nof_owner;
6053 struct nfsmount *nmp;
6054 int error, lockerror = ENOENT, status, nfsvers, numops;
6055 struct nfsm_chain nmreq, nmrep;
6056 u_int64_t xid;
6057 struct nfsreq_secinfo_args si;
6058
6059 nmp = NFSTONMP(np);
6060 if (nfs_mount_gone(nmp)) {
6061 return ENXIO;
6062 }
6063 nfsvers = nmp->nm_vers;
6064
6065 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6066 return error;
6067 }
6068
6069 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6070 nfsm_chain_null(&nmreq);
6071 nfsm_chain_null(&nmrep);
6072
6073 // PUTFH, OPEN_DOWNGRADE, GETATTR
6074 numops = 3;
6075 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6076 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
6077 numops--;
6078 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6079 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6080 numops--;
6081 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
6082 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6083 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6084 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6085 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6086 numops--;
6087 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6088 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6089 nfsm_chain_build_done(error, &nmreq);
6090 nfsm_assert(error, (numops == 0), EPROTO);
6091 nfsmout_if(error);
6092 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6093 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6094 &si, R_NOINTR, &nmrep, &xid, &status);
6095
6096 if ((lockerror = nfs_node_lock(np))) {
6097 error = lockerror;
6098 }
6099 nfsm_chain_skip_tag(error, &nmrep);
6100 nfsm_chain_get_32(error, &nmrep, numops);
6101 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6102 nfsmout_if(error);
6103 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6104 nfs_owner_seqid_increment(noop, NULL, error);
6105 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6106 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6107 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6108nfsmout:
6109 if (!lockerror) {
6110 nfs_node_unlock(np);
6111 }
6112 nfs_open_owner_clear_busy(noop);
6113 nfsm_chain_cleanup(&nmreq);
6114 nfsm_chain_cleanup(&nmrep);
6115 return error;
6116}
6117
6118int
6119nfs4_close_rpc(
6120 nfsnode_t np,
6121 struct nfs_open_file *nofp,
6122 thread_t thd,
6123 kauth_cred_t cred,
6124 int flags)
6125{
6126 struct nfs_open_owner *noop = nofp->nof_owner;
6127 struct nfsmount *nmp;
6128 int error, lockerror = ENOENT, status, nfsvers, numops;
6129 struct nfsm_chain nmreq, nmrep;
6130 u_int64_t xid;
6131 struct nfsreq_secinfo_args si;
6132
6133 nmp = NFSTONMP(np);
6134 if (nfs_mount_gone(nmp)) {
6135 return ENXIO;
6136 }
6137 nfsvers = nmp->nm_vers;
6138
6139 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6140 return error;
6141 }
6142
6143 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6144 nfsm_chain_null(&nmreq);
6145 nfsm_chain_null(&nmrep);
6146
6147 // PUTFH, CLOSE, GETATTR
6148 numops = 3;
6149 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6150 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6151 numops--;
6152 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6153 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6154 numops--;
6155 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
6156 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6157 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6158 numops--;
6159 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6160 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6161 nfsm_chain_build_done(error, &nmreq);
6162 nfsm_assert(error, (numops == 0), EPROTO);
6163 nfsmout_if(error);
6164 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6165
6166 if ((lockerror = nfs_node_lock(np))) {
6167 error = lockerror;
6168 }
6169 nfsm_chain_skip_tag(error, &nmrep);
6170 nfsm_chain_get_32(error, &nmrep, numops);
6171 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6172 nfsmout_if(error);
6173 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6174 nfs_owner_seqid_increment(noop, NULL, error);
6175 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6176 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6177 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6178nfsmout:
6179 if (!lockerror) {
6180 nfs_node_unlock(np);
6181 }
6182 nfs_open_owner_clear_busy(noop);
6183 nfsm_chain_cleanup(&nmreq);
6184 nfsm_chain_cleanup(&nmrep);
6185 return error;
6186}
6187
6188
6189/*
6190 * Claim the delegated open combinations this open file holds.
6191 */
6192int
6193nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6194{
6195 struct nfs_open_owner *noop = nofp->nof_owner;
6196 struct nfs_lock_owner *nlop;
6197 struct nfs_file_lock *nflp, *nextnflp;
6198 struct nfsmount *nmp;
6199 int error = 0, reopen = 0;
6200
6201 if (nofp->nof_d_rw_drw) {
6202 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6203 if (!error) {
6204 lck_mtx_lock(&nofp->nof_lock);
6205 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6206 nofp->nof_d_rw_drw = 0;
6207 lck_mtx_unlock(&nofp->nof_lock);
6208 }
6209 }
6210 if (!error && nofp->nof_d_w_drw) {
6211 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6212 if (!error) {
6213 lck_mtx_lock(&nofp->nof_lock);
6214 nofp->nof_w_drw += nofp->nof_d_w_drw;
6215 nofp->nof_d_w_drw = 0;
6216 lck_mtx_unlock(&nofp->nof_lock);
6217 }
6218 }
6219 if (!error && nofp->nof_d_r_drw) {
6220 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6221 if (!error) {
6222 lck_mtx_lock(&nofp->nof_lock);
6223 nofp->nof_r_drw += nofp->nof_d_r_drw;
6224 nofp->nof_d_r_drw = 0;
6225 lck_mtx_unlock(&nofp->nof_lock);
6226 }
6227 }
6228 if (!error && nofp->nof_d_rw_dw) {
6229 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6230 if (!error) {
6231 lck_mtx_lock(&nofp->nof_lock);
6232 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6233 nofp->nof_d_rw_dw = 0;
6234 lck_mtx_unlock(&nofp->nof_lock);
6235 }
6236 }
6237 if (!error && nofp->nof_d_w_dw) {
6238 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6239 if (!error) {
6240 lck_mtx_lock(&nofp->nof_lock);
6241 nofp->nof_w_dw += nofp->nof_d_w_dw;
6242 nofp->nof_d_w_dw = 0;
6243 lck_mtx_unlock(&nofp->nof_lock);
6244 }
6245 }
6246 if (!error && nofp->nof_d_r_dw) {
6247 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6248 if (!error) {
6249 lck_mtx_lock(&nofp->nof_lock);
6250 nofp->nof_r_dw += nofp->nof_d_r_dw;
6251 nofp->nof_d_r_dw = 0;
6252 lck_mtx_unlock(&nofp->nof_lock);
6253 }
6254 }
6255 /* non-deny-mode opens may be reopened if no locks are held */
6256 if (!error && nofp->nof_d_rw) {
6257 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6258 /* for some errors, we should just try reopening the file */
6259 if (nfs_mount_state_error_delegation_lost(error)) {
6260 reopen = error;
6261 }
6262 if (!error || reopen) {
6263 lck_mtx_lock(&nofp->nof_lock);
6264 nofp->nof_rw += nofp->nof_d_rw;
6265 nofp->nof_d_rw = 0;
6266 lck_mtx_unlock(&nofp->nof_lock);
6267 }
6268 }
6269 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6270 if ((!error || reopen) && nofp->nof_d_w) {
6271 if (!error) {
6272 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6273 /* for some errors, we should just try reopening the file */
6274 if (nfs_mount_state_error_delegation_lost(error)) {
6275 reopen = error;
6276 }
6277 }
6278 if (!error || reopen) {
6279 lck_mtx_lock(&nofp->nof_lock);
6280 nofp->nof_w += nofp->nof_d_w;
6281 nofp->nof_d_w = 0;
6282 lck_mtx_unlock(&nofp->nof_lock);
6283 }
6284 }
6285 if ((!error || reopen) && nofp->nof_d_r) {
6286 if (!error) {
6287 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6288 /* for some errors, we should just try reopening the file */
6289 if (nfs_mount_state_error_delegation_lost(error)) {
6290 reopen = error;
6291 }
6292 }
6293 if (!error || reopen) {
6294 lck_mtx_lock(&nofp->nof_lock);
6295 nofp->nof_r += nofp->nof_d_r;
6296 nofp->nof_d_r = 0;
6297 lck_mtx_unlock(&nofp->nof_lock);
6298 }
6299 }
6300
6301 if (reopen) {
6302 /*
6303 * Any problems with the delegation probably indicates that we
6304 * should review/return all of our current delegation state.
6305 */
6306 if ((nmp = NFSTONMP(nofp->nof_np))) {
6307 nfs4_delegation_return_enqueue(nofp->nof_np);
6308 lck_mtx_lock(&nmp->nm_lock);
6309 nfs_need_recover(nmp, NFSERR_EXPIRED);
6310 lck_mtx_unlock(&nmp->nm_lock);
6311 }
6312 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6313 /* just reopen the file on next access */
6314 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6315 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6316 lck_mtx_lock(&nofp->nof_lock);
6317 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6318 lck_mtx_unlock(&nofp->nof_lock);
6319 return 0;
6320 }
6321 if (reopen) {
6322 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6323 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6324 }
6325 }
6326
6327 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6328 /* claim delegated locks */
6329 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6330 if (nlop->nlo_open_owner != noop) {
6331 continue;
6332 }
6333 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6334 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6335 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6336 continue;
6337 }
6338 /* skip non-delegated locks */
6339 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6340 continue;
6341 }
6342 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6343 if (error) {
6344 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6345 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6346 break;
6347 }
6348 // else {
6349 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6350 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6351 // }
6352 }
6353 if (error) {
6354 break;
6355 }
6356 }
6357 }
6358
6359 if (!error) { /* all state claimed successfully! */
6360 return 0;
6361 }
6362
6363 /* restart if it looks like a problem more than just losing the delegation */
6364 if (!nfs_mount_state_error_delegation_lost(error) &&
6365 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6366 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6367 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6368 nfs_need_reconnect(nmp);
6369 }
6370 return error;
6371 }
6372
6373 /* delegated state lost (once held but now not claimable) */
6374 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6375
6376 /*
6377 * Any problems with the delegation probably indicates that we
6378 * should review/return all of our current delegation state.
6379 */
6380 if ((nmp = NFSTONMP(nofp->nof_np))) {
6381 nfs4_delegation_return_enqueue(nofp->nof_np);
6382 lck_mtx_lock(&nmp->nm_lock);
6383 nfs_need_recover(nmp, NFSERR_EXPIRED);
6384 lck_mtx_unlock(&nmp->nm_lock);
6385 }
6386
6387 /* revoke all open file state */
6388 nfs_revoke_open_state_for_node(nofp->nof_np);
6389
6390 return error;
6391}
6392#endif /* CONFIG_NFS4*/
6393
6394/*
6395 * Release all open state for the given node.
6396 */
6397void
6398nfs_release_open_state_for_node(nfsnode_t np, int force)
6399{
6400 struct nfsmount *nmp = NFSTONMP(np);
6401 struct nfs_open_file *nofp;
6402 struct nfs_file_lock *nflp, *nextnflp;
6403
6404 /* drop held locks */
6405 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6406 /* skip dead & blocked lock requests */
6407 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6408 continue;
6409 }
6410 /* send an unlock if not a delegated lock */
6411 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6412 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6413 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6414 }
6415 /* kill/remove the lock */
6416 lck_mtx_lock(&np->n_openlock);
6417 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6418 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6419 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6420 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6421 if (nflp->nfl_blockcnt) {
6422 /* wake up anyone blocked on this lock */
6423 wakeup(nflp);
6424 } else {
6425 /* remove nflp from lock list and destroy */
6426 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6427 nfs_file_lock_destroy(nflp);
6428 }
6429 lck_mtx_unlock(&np->n_openlock);
6430 }
6431
6432 lck_mtx_lock(&np->n_openlock);
6433
6434 /* drop all opens */
6435 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6436 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6437 continue;
6438 }
6439 /* mark open state as lost */
6440 lck_mtx_lock(&nofp->nof_lock);
6441 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6442 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6443
6444 lck_mtx_unlock(&nofp->nof_lock);
6445#if CONFIG_NFS4
6446 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6447 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6448 }
6449#endif
6450 }
6451
6452 lck_mtx_unlock(&np->n_openlock);
6453}
6454
6455/*
6456 * State for a node has been lost, drop it, and revoke the node.
6457 * Attempt to return any state if possible in case the server
6458 * might somehow think we hold it.
6459 */
6460void
6461nfs_revoke_open_state_for_node(nfsnode_t np)
6462{
6463 struct nfsmount *nmp;
6464
6465 /* mark node as needing to be revoked */
6466 nfs_node_lock_force(np);
6467 if (np->n_flag & NREVOKE) { /* already revoked? */
6468 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6469 nfs_node_unlock(np);
6470 return;
6471 }
6472 np->n_flag |= NREVOKE;
6473 nfs_node_unlock(np);
6474
6475 nfs_release_open_state_for_node(np, 0);
6476 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6477
6478 /* mark mount as needing a revoke scan and have the socket thread do it. */
6479 if ((nmp = NFSTONMP(np))) {
6480 lck_mtx_lock(&nmp->nm_lock);
6481 nmp->nm_state |= NFSSTA_REVOKE;
6482 nfs_mount_sock_thread_wake(nmp);
6483 lck_mtx_unlock(&nmp->nm_lock);
6484 }
6485}
6486
6487#if CONFIG_NFS4
6488/*
6489 * Claim the delegated open combinations that each of this node's open files hold.
6490 */
6491int
6492nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6493{
6494 struct nfs_open_file *nofp;
6495 int error = 0;
6496
6497 lck_mtx_lock(&np->n_openlock);
6498
6499 /* walk the open file list looking for opens with delegated state to claim */
6500restart:
6501 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6502 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6503 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6504 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6505 continue;
6506 }
6507 lck_mtx_unlock(&np->n_openlock);
6508 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6509 lck_mtx_lock(&np->n_openlock);
6510 if (error) {
6511 break;
6512 }
6513 goto restart;
6514 }
6515
6516 lck_mtx_unlock(&np->n_openlock);
6517
6518 return error;
6519}
6520
6521/*
6522 * Mark a node as needed to have its delegation returned.
6523 * Queue it up on the delegation return queue.
6524 * Make sure the thread is running.
6525 */
6526void
6527nfs4_delegation_return_enqueue(nfsnode_t np)
6528{
6529 struct nfsmount *nmp;
6530
6531 nmp = NFSTONMP(np);
6532 if (nfs_mount_gone(nmp)) {
6533 return;
6534 }
6535
6536 lck_mtx_lock(&np->n_openlock);
6537 np->n_openflags |= N_DELEG_RETURN;
6538 lck_mtx_unlock(&np->n_openlock);
6539
6540 lck_mtx_lock(&nmp->nm_lock);
6541 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6542 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6543 }
6544 nfs_mount_sock_thread_wake(nmp);
6545 lck_mtx_unlock(&nmp->nm_lock);
6546}
6547
6548/*
6549 * return any delegation we may have for the given node
6550 */
6551int
6552nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6553{
6554 struct nfsmount *nmp;
6555 fhandle_t *fh;
6556 nfs_stateid dstateid;
6557 int error;
6558
6559 nmp = NFSTONMP(np);
6560 if (nfs_mount_gone(nmp)) {
6561 return ENXIO;
6562 }
6563
6564 fh = zalloc(nfs_fhandle_zone);
6565
6566 /* first, make sure the node's marked for delegation return */
6567 lck_mtx_lock(&np->n_openlock);
6568 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6569 lck_mtx_unlock(&np->n_openlock);
6570
6571 /* make sure nobody else is using the delegation state */
6572 if ((error = nfs_open_state_set_busy(np, NULL))) {
6573 goto out;
6574 }
6575
6576 /* claim any delegated state */
6577 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6578 goto out;
6579 }
6580
6581 /* return the delegation */
6582 lck_mtx_lock(&np->n_openlock);
6583 dstateid = np->n_dstateid;
6584 fh->fh_len = np->n_fhsize;
6585 bcopy(np->n_fhp, fh->fh_data, fh->fh_len);
6586 lck_mtx_unlock(&np->n_openlock);
6587 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred);
6588 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6589 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6590 lck_mtx_lock(&np->n_openlock);
6591 np->n_openflags &= ~N_DELEG_MASK;
6592 lck_mtx_lock(&nmp->nm_lock);
6593 if (np->n_dlink.tqe_next != NFSNOLIST) {
6594 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6595 np->n_dlink.tqe_next = NFSNOLIST;
6596 }
6597 lck_mtx_unlock(&nmp->nm_lock);
6598 lck_mtx_unlock(&np->n_openlock);
6599 }
6600
6601out:
6602 /* make sure it's no longer on the return queue and clear the return flags */
6603 lck_mtx_lock(&nmp->nm_lock);
6604 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6605 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6606 np->n_dreturn.tqe_next = NFSNOLIST;
6607 }
6608 lck_mtx_unlock(&nmp->nm_lock);
6609 lck_mtx_lock(&np->n_openlock);
6610 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6611 lck_mtx_unlock(&np->n_openlock);
6612
6613 if (error) {
6614 NP(np, "nfs4_delegation_return, error %d", error);
6615 if (error == ETIMEDOUT) {
6616 nfs_need_reconnect(nmp);
6617 }
6618 if (nfs_mount_state_error_should_restart(error)) {
6619 /* make sure recovery happens */
6620 lck_mtx_lock(&nmp->nm_lock);
6621 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6622 lck_mtx_unlock(&nmp->nm_lock);
6623 }
6624 }
6625
6626 nfs_open_state_clear_busy(np);
6627 NFS_ZFREE(nfs_fhandle_zone, fh);
6628 return error;
6629}
6630
6631/*
6632 * RPC to return a delegation for a file handle
6633 */
6634int
6635nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6636{
6637 int error = 0, status, numops;
6638 uint64_t xid;
6639 struct nfsm_chain nmreq, nmrep;
6640 struct nfsreq_secinfo_args si;
6641
6642 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6643 nfsm_chain_null(&nmreq);
6644 nfsm_chain_null(&nmrep);
6645
6646 // PUTFH, DELEGRETURN
6647 numops = 2;
6648 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6649 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6650 numops--;
6651 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6652 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6653 numops--;
6654 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6655 nfsm_chain_add_stateid(error, &nmreq, sid);
6656 nfsm_chain_build_done(error, &nmreq);
6657 nfsm_assert(error, (numops == 0), EPROTO);
6658 nfsmout_if(error);
6659 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6660 nfsm_chain_skip_tag(error, &nmrep);
6661 nfsm_chain_get_32(error, &nmrep, numops);
6662 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6663 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6664nfsmout:
6665 nfsm_chain_cleanup(&nmreq);
6666 nfsm_chain_cleanup(&nmrep);
6667 return error;
6668}
6669#endif /* CONFIG_NFS4 */
6670
6671/*
6672 * NFS read call.
6673 * Just call nfs_bioread() to do the work.
6674 *
6675 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6676 * without first calling VNOP_OPEN, so we make sure the file is open here.
6677 */
6678int
6679nfs_vnop_read(
6680 struct vnop_read_args /* {
6681 * struct vnodeop_desc *a_desc;
6682 * vnode_t a_vp;
6683 * struct uio *a_uio;
6684 * int a_ioflag;
6685 * vfs_context_t a_context;
6686 * } */*ap)
6687{
6688 vnode_t vp = ap->a_vp;
6689 vfs_context_t ctx = ap->a_context;
6690 nfsnode_t np;
6691 struct nfsmount *nmp;
6692 struct nfs_open_owner *noop;
6693 struct nfs_open_file *nofp;
6694 int error;
6695
6696 if (vnode_vtype(ap->a_vp) != VREG) {
6697 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6698 }
6699
6700 np = VTONFS(vp);
6701 nmp = NFSTONMP(np);
6702 if (nfs_mount_gone(nmp)) {
6703 return ENXIO;
6704 }
6705 if (np->n_flag & NREVOKE) {
6706 return EIO;
6707 }
6708
6709 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6710 if (!noop) {
6711 return ENOMEM;
6712 }
6713restart:
6714 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6715 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6716 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6717 error = EIO;
6718 }
6719#if CONFIG_NFS4
6720 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6721 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6722 nofp = NULL;
6723 if (!error) {
6724 goto restart;
6725 }
6726 }
6727#endif
6728 if (error) {
6729 nfs_open_owner_rele(noop);
6730 return error;
6731 }
6732 /*
6733 * Since the read path is a hot path, if we already have
6734 * read access, lets go and try and do the read, without
6735 * busying the mount and open file node for this open owner.
6736 *
6737 * N.B. This is inherently racy w.r.t. an execve using
6738 * an already open file, in that the read at the end of
6739 * this routine will be racing with a potential close.
6740 * The code below ultimately has the same problem. In practice
6741 * this does not seem to be an issue.
6742 */
6743 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6744 nfs_open_owner_rele(noop);
6745 goto do_read;
6746 }
6747 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6748 if (error) {
6749 nfs_open_owner_rele(noop);
6750 return error;
6751 }
6752 /*
6753 * If we don't have a file already open with the access we need (read) then
6754 * we need to open one. Otherwise we just co-opt an open. We might not already
6755 * have access because we're trying to read the first page of the
6756 * file for execve.
6757 */
6758 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6759 if (error) {
6760 nfs_mount_state_in_use_end(nmp, 0);
6761 nfs_open_owner_rele(noop);
6762 return error;
6763 }
6764 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6765 /* we don't have the file open, so open it for read access if we're not denied */
6766 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6767 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6768 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6769 }
6770 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6771 nfs_open_file_clear_busy(nofp);
6772 nfs_mount_state_in_use_end(nmp, 0);
6773 nfs_open_owner_rele(noop);
6774 return EPERM;
6775 }
6776 if (np->n_flag & NREVOKE) {
6777 error = EIO;
6778 nfs_open_file_clear_busy(nofp);
6779 nfs_mount_state_in_use_end(nmp, 0);
6780 nfs_open_owner_rele(noop);
6781 return error;
6782 }
6783 if (nmp->nm_vers < NFS_VER4) {
6784 /* NFS v2/v3 opens are always allowed - so just add it. */
6785 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6786 }
6787#if CONFIG_NFS4
6788 else {
6789 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6790 }
6791#endif
6792 if (!error) {
6793 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6794 }
6795 }
6796 if (nofp) {
6797 nfs_open_file_clear_busy(nofp);
6798 }
6799 if (nfs_mount_state_in_use_end(nmp, error)) {
6800 nofp = NULL;
6801 goto restart;
6802 }
6803 nfs_open_owner_rele(noop);
6804 if (error) {
6805 return error;
6806 }
6807do_read:
6808 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6809}
6810
6811#if CONFIG_NFS4
6812/*
6813 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6814 * Files are created using the NFSv4 OPEN RPC. So we must open the
6815 * file to create it and then close it.
6816 */
6817int
6818nfs4_vnop_create(
6819 struct vnop_create_args /* {
6820 * struct vnodeop_desc *a_desc;
6821 * vnode_t a_dvp;
6822 * vnode_t *a_vpp;
6823 * struct componentname *a_cnp;
6824 * struct vnode_attr *a_vap;
6825 * vfs_context_t a_context;
6826 * } */*ap)
6827{
6828 vfs_context_t ctx = ap->a_context;
6829 struct componentname *cnp = ap->a_cnp;
6830 struct vnode_attr *vap = ap->a_vap;
6831 vnode_t dvp = ap->a_dvp;
6832 vnode_t *vpp = ap->a_vpp;
6833 struct nfsmount *nmp;
6834 nfsnode_t np;
6835 int error = 0, busyerror = 0, accessMode, denyMode;
6836 struct nfs_open_owner *noop = NULL;
6837 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6838
6839 nmp = VTONMP(dvp);
6840 if (nfs_mount_gone(nmp)) {
6841 return ENXIO;
6842 }
6843
6844 if (vap) {
6845 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6846 }
6847
6848 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6849 if (!noop) {
6850 return ENOMEM;
6851 }
6852
6853restart:
6854 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6855 if (error) {
6856 nfs_open_owner_rele(noop);
6857 return error;
6858 }
6859
6860 /* grab a provisional, nodeless open file */
6861 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6862 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6863 printf("nfs_vnop_create: LOST\n");
6864 error = EIO;
6865 }
6866 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6867 /* This shouldn't happen given that this is a new, nodeless nofp */
6868 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6869 nfs_open_file_destroy(newnofp);
6870 newnofp = NULL;
6871 if (!error) {
6872 nfs_mount_state_in_use_end(nmp, 0);
6873 goto restart;
6874 }
6875 }
6876 if (!error) {
6877 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6878 }
6879 if (error) {
6880 if (newnofp) {
6881 nfs_open_file_destroy(newnofp);
6882 }
6883 newnofp = NULL;
6884 goto out;
6885 }
6886
6887 /*
6888 * We're just trying to create the file.
6889 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6890 */
6891 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6892 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6893
6894 /* Do the open/create */
6895 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6896 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6897 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6898 /*
6899 * Hmm... it looks like we may have a situation where the request was
6900 * retransmitted because we didn't get the first response which successfully
6901 * created/opened the file and then the second time we were denied the open
6902 * because the mode the file was created with doesn't allow write access.
6903 *
6904 * We'll try to work around this by temporarily updating the mode and
6905 * retrying the open.
6906 */
6907 struct vnode_attr vattr;
6908
6909 /* first make sure it's there */
6910 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6911 if (!error2 && np) {
6912 nfs_node_unlock(np);
6913 *vpp = NFSTOV(np);
6914 if (vnode_vtype(NFSTOV(np)) == VREG) {
6915 VATTR_INIT(&vattr);
6916 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6917 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6918 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6919 VATTR_INIT(&vattr);
6920 VATTR_SET(&vattr, va_mode, vap->va_mode);
6921 nfs4_setattr_rpc(np, &vattr, ctx);
6922 if (!error2) {
6923 error = 0;
6924 }
6925 }
6926 }
6927 if (error) {
6928 vnode_put(*vpp);
6929 *vpp = NULL;
6930 }
6931 }
6932 }
6933 if (!error && !*vpp) {
6934 printf("nfs4_open_rpc returned without a node?\n");
6935 /* Hmmm... with no node, we have no filehandle and can't close it */
6936 error = EIO;
6937 }
6938 if (error) {
6939 /* need to cleanup our temporary nofp */
6940 nfs_open_file_clear_busy(newnofp);
6941 nfs_open_file_destroy(newnofp);
6942 newnofp = NULL;
6943 goto out;
6944 }
6945 /* After we have a node, add our open file struct to the node */
6946 np = VTONFS(*vpp);
6947 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6948 nofp = newnofp;
6949 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6950 if (error) {
6951 /* This shouldn't happen, because we passed in a new nofp to use. */
6952 printf("nfs_open_file_find_internal failed! %d\n", error);
6953 goto out;
6954 } else if (nofp != newnofp) {
6955 /*
6956 * Hmm... an open file struct already exists.
6957 * Mark the existing one busy and merge our open into it.
6958 * Then destroy the one we created.
6959 * Note: there's no chance of an open confict because the
6960 * open has already been granted.
6961 */
6962 busyerror = nfs_open_file_set_busy(nofp, NULL);
6963 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6964 nofp->nof_stateid = newnofp->nof_stateid;
6965 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6966 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6967 }
6968 nfs_open_file_clear_busy(newnofp);
6969 nfs_open_file_destroy(newnofp);
6970 }
6971 newnofp = NULL;
6972 /* mark the node as holding a create-initiated open */
6973 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6974 nofp->nof_creator = current_thread();
6975out:
6976 if (nofp && !busyerror) {
6977 nfs_open_file_clear_busy(nofp);
6978 }
6979 if (nfs_mount_state_in_use_end(nmp, error)) {
6980 nofp = newnofp = NULL;
6981 busyerror = 0;
6982 goto restart;
6983 }
6984 if (noop) {
6985 nfs_open_owner_rele(noop);
6986 }
6987 return error;
6988}
6989
6990/*
6991 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6992 */
6993int
6994nfs4_create_rpc(
6995 vfs_context_t ctx,
6996 nfsnode_t dnp,
6997 struct componentname *cnp,
6998 struct vnode_attr *vap,
6999 int type,
7000 char *link,
7001 nfsnode_t *npp)
7002{
7003 struct nfsmount *nmp;
7004 struct nfs_vattr *nvattr;
7005 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
7006 int nfsvers, namedattrs, numops;
7007 u_int64_t xid = 0, savedxid = 0;
7008 nfsnode_t np = NULL;
7009 vnode_t newvp = NULL;
7010 struct nfsm_chain nmreq, nmrep;
7011 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7012 const char *tag;
7013 nfs_specdata sd;
7014 fhandle_t *fh;
7015 struct nfsreq *req;
7016 struct nfs_dulookup *dul;
7017 struct nfsreq_secinfo_args si;
7018
7019 nmp = NFSTONMP(dnp);
7020 if (nfs_mount_gone(nmp)) {
7021 return ENXIO;
7022 }
7023 nfsvers = nmp->nm_vers;
7024 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7025 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7026 return EINVAL;
7027 }
7028
7029 sd.specdata1 = sd.specdata2 = 0;
7030
7031 switch (type) {
7032 case NFLNK:
7033 tag = "symlink";
7034 break;
7035 case NFBLK:
7036 case NFCHR:
7037 tag = "mknod";
7038 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
7039 return EINVAL;
7040 }
7041 sd.specdata1 = major(vap->va_rdev);
7042 sd.specdata2 = minor(vap->va_rdev);
7043 break;
7044 case NFSOCK:
7045 case NFFIFO:
7046 tag = "mknod";
7047 break;
7048 case NFDIR:
7049 tag = "mkdir";
7050 break;
7051 default:
7052 return EINVAL;
7053 }
7054
7055 fh = zalloc(nfs_fhandle_zone);
7056 req = zalloc(nfs_req_zone);
7057 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7058 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7059 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
7060
7061 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
7062 if (!namedattrs) {
7063 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7064 }
7065
7066 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
7067 NVATTR_INIT(nvattr);
7068 nfsm_chain_null(&nmreq);
7069 nfsm_chain_null(&nmrep);
7070
7071 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7072 numops = 6;
7073 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
7074 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
7075 numops--;
7076 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7077 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
7078 numops--;
7079 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7080 numops--;
7081 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
7082 nfsm_chain_add_32(error, &nmreq, type);
7083 if (type == NFLNK) {
7084 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
7085 } else if ((type == NFBLK) || (type == NFCHR)) {
7086 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
7087 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
7088 }
7089 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7090 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7091 numops--;
7092 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7093 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7094 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7095 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7096 numops--;
7097 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7098 numops--;
7099 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7100 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7101 nfsm_chain_build_done(error, &nmreq);
7102 nfsm_assert(error, (numops == 0), EPROTO);
7103 nfsmout_if(error);
7104
7105 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7106 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7107 if (!error) {
7108 if (!namedattrs) {
7109 nfs_dulookup_start(dul, dnp, ctx);
7110 }
7111 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7112 }
7113
7114 if ((lockerror = nfs_node_lock(dnp))) {
7115 error = lockerror;
7116 }
7117 nfsm_chain_skip_tag(error, &nmrep);
7118 nfsm_chain_get_32(error, &nmrep, numops);
7119 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7120 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7121 nfsmout_if(error);
7122 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7123 nfsm_chain_check_change_info(error, &nmrep, dnp);
7124 bmlen = NFS_ATTR_BITMAP_LEN;
7125 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7126 /* At this point if we have no error, the object was created. */
7127 /* if we don't get attributes, then we should lookitup. */
7128 create_error = error;
7129 nfsmout_if(error);
7130 nfs_vattr_set_supported(bitmap, vap);
7131 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7132 nfsmout_if(error);
7133 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7134 nfsmout_if(error);
7135 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7136 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7137 error = EBADRPC;
7138 goto nfsmout;
7139 }
7140 /* directory attributes: if we don't get them, make sure to invalidate */
7141 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7142 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7143 savedxid = xid;
7144 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7145 if (error) {
7146 NATTRINVALIDATE(dnp);
7147 }
7148
7149nfsmout:
7150 nfsm_chain_cleanup(&nmreq);
7151 nfsm_chain_cleanup(&nmrep);
7152
7153 if (!lockerror) {
7154 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7155 dnp->n_flag &= ~NNEGNCENTRIES;
7156 cache_purge_negatives(NFSTOV(dnp));
7157 }
7158 dnp->n_flag |= NMODIFIED;
7159 nfs_node_unlock(dnp);
7160 /* nfs_getattr() will check changed and purge caches */
7161 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7162 }
7163
7164 if (!error && fh->fh_len) {
7165 /* create the vnode with the filehandle and attributes */
7166 xid = savedxid;
7167 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
7168 if (!error) {
7169 newvp = NFSTOV(np);
7170 }
7171 }
7172
7173 if (!namedattrs) {
7174 nfs_dulookup_finish(dul, dnp, ctx);
7175 }
7176
7177 NVATTR_CLEANUP(nvattr);
7178 NFS_ZFREE(nfs_fhandle_zone, fh);
7179 NFS_ZFREE(nfs_req_zone, req);
7180 FREE(dul, M_TEMP);
7181 FREE(nvattr, M_TEMP);
7182
7183 /*
7184 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7185 * if we can succeed in looking up the object.
7186 */
7187 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7188 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7189 if (!error) {
7190 newvp = NFSTOV(np);
7191 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7192 error = EEXIST;
7193 }
7194 }
7195 }
7196 if (!busyerror) {
7197 nfs_node_clear_busy(dnp);
7198 }
7199 if (error) {
7200 if (newvp) {
7201 nfs_node_unlock(np);
7202 vnode_put(newvp);
7203 }
7204 } else {
7205 nfs_node_unlock(np);
7206 *npp = np;
7207 }
7208 return error;
7209}
7210
7211int
7212nfs4_vnop_mknod(
7213 struct vnop_mknod_args /* {
7214 * struct vnodeop_desc *a_desc;
7215 * vnode_t a_dvp;
7216 * vnode_t *a_vpp;
7217 * struct componentname *a_cnp;
7218 * struct vnode_attr *a_vap;
7219 * vfs_context_t a_context;
7220 * } */*ap)
7221{
7222 nfsnode_t np = NULL;
7223 struct nfsmount *nmp;
7224 int error;
7225
7226 nmp = VTONMP(ap->a_dvp);
7227 if (nfs_mount_gone(nmp)) {
7228 return ENXIO;
7229 }
7230
7231 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7232 return EINVAL;
7233 }
7234 switch (ap->a_vap->va_type) {
7235 case VBLK:
7236 case VCHR:
7237 case VFIFO:
7238 case VSOCK:
7239 break;
7240 default:
7241 return ENOTSUP;
7242 }
7243
7244 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7245 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7246 if (!error) {
7247 *ap->a_vpp = NFSTOV(np);
7248 }
7249 return error;
7250}
7251
7252int
7253nfs4_vnop_mkdir(
7254 struct vnop_mkdir_args /* {
7255 * struct vnodeop_desc *a_desc;
7256 * vnode_t a_dvp;
7257 * vnode_t *a_vpp;
7258 * struct componentname *a_cnp;
7259 * struct vnode_attr *a_vap;
7260 * vfs_context_t a_context;
7261 * } */*ap)
7262{
7263 nfsnode_t np = NULL;
7264 int error;
7265
7266 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7267 NFDIR, NULL, &np);
7268 if (!error) {
7269 *ap->a_vpp = NFSTOV(np);
7270 }
7271 return error;
7272}
7273
7274int
7275nfs4_vnop_symlink(
7276 struct vnop_symlink_args /* {
7277 * struct vnodeop_desc *a_desc;
7278 * vnode_t a_dvp;
7279 * vnode_t *a_vpp;
7280 * struct componentname *a_cnp;
7281 * struct vnode_attr *a_vap;
7282 * char *a_target;
7283 * vfs_context_t a_context;
7284 * } */*ap)
7285{
7286 nfsnode_t np = NULL;
7287 int error;
7288
7289 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7290 NFLNK, ap->a_target, &np);
7291 if (!error) {
7292 *ap->a_vpp = NFSTOV(np);
7293 }
7294 return error;
7295}
7296
7297int
7298nfs4_vnop_link(
7299 struct vnop_link_args /* {
7300 * struct vnodeop_desc *a_desc;
7301 * vnode_t a_vp;
7302 * vnode_t a_tdvp;
7303 * struct componentname *a_cnp;
7304 * vfs_context_t a_context;
7305 * } */*ap)
7306{
7307 vfs_context_t ctx = ap->a_context;
7308 vnode_t vp = ap->a_vp;
7309 vnode_t tdvp = ap->a_tdvp;
7310 struct componentname *cnp = ap->a_cnp;
7311 int error = 0, lockerror = ENOENT, status;
7312 struct nfsmount *nmp;
7313 nfsnode_t np = VTONFS(vp);
7314 nfsnode_t tdnp = VTONFS(tdvp);
7315 int nfsvers, numops;
7316 u_int64_t xid, savedxid;
7317 struct nfsm_chain nmreq, nmrep;
7318 struct nfsreq_secinfo_args si;
7319
7320 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7321 return EXDEV;
7322 }
7323
7324 nmp = VTONMP(vp);
7325 if (nfs_mount_gone(nmp)) {
7326 return ENXIO;
7327 }
7328 nfsvers = nmp->nm_vers;
7329 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7330 return EINVAL;
7331 }
7332 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7333 return EINVAL;
7334 }
7335
7336 /*
7337 * Push all writes to the server, so that the attribute cache
7338 * doesn't get "out of sync" with the server.
7339 * XXX There should be a better way!
7340 */
7341 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7342
7343 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7344 return error;
7345 }
7346
7347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7348 nfsm_chain_null(&nmreq);
7349 nfsm_chain_null(&nmrep);
7350
7351 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7352 numops = 7;
7353 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7354 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7355 numops--;
7356 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7357 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7358 numops--;
7359 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7360 numops--;
7361 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7362 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7363 numops--;
7364 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7365 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7366 numops--;
7367 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7368 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7369 numops--;
7370 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7371 numops--;
7372 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7373 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7374 nfsm_chain_build_done(error, &nmreq);
7375 nfsm_assert(error, (numops == 0), EPROTO);
7376 nfsmout_if(error);
7377 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7378
7379 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7380 error = lockerror;
7381 goto nfsmout;
7382 }
7383 nfsm_chain_skip_tag(error, &nmrep);
7384 nfsm_chain_get_32(error, &nmrep, numops);
7385 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7386 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7387 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7388 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7389 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7390 /* directory attributes: if we don't get them, make sure to invalidate */
7391 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7392 savedxid = xid;
7393 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7394 if (error) {
7395 NATTRINVALIDATE(tdnp);
7396 }
7397 /* link attributes: if we don't get them, make sure to invalidate */
7398 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7399 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7400 xid = savedxid;
7401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7402 if (error) {
7403 NATTRINVALIDATE(np);
7404 }
7405nfsmout:
7406 nfsm_chain_cleanup(&nmreq);
7407 nfsm_chain_cleanup(&nmrep);
7408 if (!lockerror) {
7409 tdnp->n_flag |= NMODIFIED;
7410 }
7411 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7412 if (error == EEXIST) {
7413 error = 0;
7414 }
7415 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7416 tdnp->n_flag &= ~NNEGNCENTRIES;
7417 cache_purge_negatives(tdvp);
7418 }
7419 if (!lockerror) {
7420 nfs_node_unlock2(tdnp, np);
7421 }
7422 nfs_node_clear_busy2(tdnp, np);
7423 return error;
7424}
7425
7426int
7427nfs4_vnop_rmdir(
7428 struct vnop_rmdir_args /* {
7429 * struct vnodeop_desc *a_desc;
7430 * vnode_t a_dvp;
7431 * vnode_t a_vp;
7432 * struct componentname *a_cnp;
7433 * vfs_context_t a_context;
7434 * } */*ap)
7435{
7436 vfs_context_t ctx = ap->a_context;
7437 vnode_t vp = ap->a_vp;
7438 vnode_t dvp = ap->a_dvp;
7439 struct componentname *cnp = ap->a_cnp;
7440 struct nfsmount *nmp;
7441 int error = 0, namedattrs;
7442 nfsnode_t np = VTONFS(vp);
7443 nfsnode_t dnp = VTONFS(dvp);
7444 struct nfs_dulookup *dul;
7445
7446 if (vnode_vtype(vp) != VDIR) {
7447 return EINVAL;
7448 }
7449
7450 nmp = NFSTONMP(dnp);
7451 if (nfs_mount_gone(nmp)) {
7452 return ENXIO;
7453 }
7454 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7455
7456 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7457 return error;
7458 }
7459
7460 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7461 if (!namedattrs) {
7462 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7463 nfs_dulookup_start(dul, dnp, ctx);
7464 }
7465
7466 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7467 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7468
7469 nfs_name_cache_purge(dnp, np, cnp, ctx);
7470 /* nfs_getattr() will check changed and purge caches */
7471 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7472 if (!namedattrs) {
7473 nfs_dulookup_finish(dul, dnp, ctx);
7474 }
7475 nfs_node_clear_busy2(dnp, np);
7476
7477 /*
7478 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7479 */
7480 if (error == ENOENT) {
7481 error = 0;
7482 }
7483 if (!error) {
7484 /*
7485 * remove nfsnode from hash now so we can't accidentally find it
7486 * again if another object gets created with the same filehandle
7487 * before this vnode gets reclaimed
7488 */
7489 lck_mtx_lock(&nfs_node_hash_mutex);
7490 if (np->n_hflag & NHHASHED) {
7491 LIST_REMOVE(np, n_hash);
7492 np->n_hflag &= ~NHHASHED;
7493 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7494 }
7495 lck_mtx_unlock(&nfs_node_hash_mutex);
7496 }
7497 FREE(dul, M_TEMP);
7498 return error;
7499}
7500
7501/*
7502 * NFSv4 Named Attributes
7503 *
7504 * Both the extended attributes interface and the named streams interface
7505 * are backed by NFSv4 named attributes. The implementations for both use
7506 * a common set of routines in an attempt to reduce code duplication, to
7507 * increase efficiency, to increase caching of both names and data, and to
7508 * confine the complexity.
7509 *
7510 * Each NFS node caches its named attribute directory's file handle.
7511 * The directory nodes for the named attribute directories are handled
7512 * exactly like regular directories (with a couple minor exceptions).
7513 * Named attribute nodes are also treated as much like regular files as
7514 * possible.
7515 *
7516 * Most of the heavy lifting is done by nfs4_named_attr_get().
7517 */
7518
7519/*
7520 * Get the given node's attribute directory node.
7521 * If !fetch, then only return a cached node.
7522 * Otherwise, we will attempt to fetch the node from the server.
7523 * (Note: the node should be marked busy.)
7524 */
7525nfsnode_t
7526nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7527{
7528 nfsnode_t adnp = NULL;
7529 struct nfsmount *nmp;
7530 int error = 0, status, numops;
7531 struct nfsm_chain nmreq, nmrep;
7532 u_int64_t xid;
7533 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7534 fhandle_t *fh;
7535 struct nfs_vattr *nvattr;
7536 struct componentname cn;
7537 struct nfsreq *req;
7538 struct nfsreq_secinfo_args si;
7539
7540 nmp = NFSTONMP(np);
7541 if (nfs_mount_gone(nmp)) {
7542 return NULL;
7543 }
7544 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7545 return NULL;
7546 }
7547
7548 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7549 fh = zalloc(nfs_fhandle_zone);
7550 req = zalloc(nfs_req_zone);
7551 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7552 NVATTR_INIT(nvattr);
7553 nfsm_chain_null(&nmreq);
7554 nfsm_chain_null(&nmrep);
7555
7556 bzero(&cn, sizeof(cn));
7557 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7558 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
7559 cn.cn_nameiop = LOOKUP;
7560
7561 if (np->n_attrdirfh) {
7562 // XXX can't set parent correctly (to np) yet
7563 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7564 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7565 if (adnp) {
7566 goto nfsmout;
7567 }
7568 }
7569 if (!fetch) {
7570 error = ENOENT;
7571 goto nfsmout;
7572 }
7573
7574 // PUTFH, OPENATTR, GETATTR
7575 numops = 3;
7576 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7577 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7578 numops--;
7579 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7580 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7581 numops--;
7582 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7583 nfsm_chain_add_32(error, &nmreq, 0);
7584 numops--;
7585 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7586 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7587 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7588 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7589 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7590 nfsm_chain_build_done(error, &nmreq);
7591 nfsm_assert(error, (numops == 0), EPROTO);
7592 nfsmout_if(error);
7593 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7594 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7595 if (!error) {
7596 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7597 }
7598
7599 nfsm_chain_skip_tag(error, &nmrep);
7600 nfsm_chain_get_32(error, &nmrep, numops);
7601 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7602 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7603 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7604 nfsmout_if(error);
7605 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7606 nfsmout_if(error);
7607 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
7608 error = ENOENT;
7609 goto nfsmout;
7610 }
7611 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
7612 /* (re)allocate attrdir fh buffer */
7613 if (np->n_attrdirfh) {
7614 FREE(np->n_attrdirfh, M_TEMP);
7615 }
7616 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
7617 }
7618 if (!np->n_attrdirfh) {
7619 error = ENOMEM;
7620 goto nfsmout;
7621 }
7622 /* cache the attrdir fh in the node */
7623 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7624 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
7625 /* create node for attrdir */
7626 // XXX can't set parent correctly (to np) yet
7627 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
7628nfsmout:
7629 NVATTR_CLEANUP(nvattr);
7630 NFS_ZFREE(nfs_fhandle_zone, fh);
7631 NFS_ZFREE(nfs_req_zone, req);
7632 FREE(nvattr, M_TEMP);
7633 nfsm_chain_cleanup(&nmreq);
7634 nfsm_chain_cleanup(&nmrep);
7635
7636 if (adnp) {
7637 /* sanity check that this node is an attribute directory */
7638 if (adnp->n_vattr.nva_type != VDIR) {
7639 error = EINVAL;
7640 }
7641 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7642 error = EINVAL;
7643 }
7644 nfs_node_unlock(adnp);
7645 if (error) {
7646 vnode_put(NFSTOV(adnp));
7647 }
7648 }
7649 return error ? NULL : adnp;
7650}
7651
7652/*
7653 * Get the given node's named attribute node for the name given.
7654 *
7655 * In an effort to increase the performance of named attribute access, we try
7656 * to reduce server requests by doing the following:
7657 *
7658 * - cache the node's named attribute directory file handle in the node
7659 * - maintain a directory vnode for the attribute directory
7660 * - use name cache entries (positive and negative) to speed up lookups
7661 * - optionally open the named attribute (with the given accessMode) in the same RPC
7662 * - combine attribute directory retrieval with the lookup/open RPC
7663 * - optionally prefetch the named attribute's first block of data in the same RPC
7664 *
7665 * Also, in an attempt to reduce the number of copies/variations of this code,
7666 * parts of the RPC building/processing code are conditionalized on what is
7667 * needed for any particular request (openattr, lookup vs. open, read).
7668 *
7669 * Note that because we may not have the attribute directory node when we start
7670 * the lookup/open, we lock both the node and the attribute directory node.
7671 */
7672
7673#define NFS_GET_NAMED_ATTR_CREATE 0x1
7674#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7675#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7676#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7677
7678int
7679nfs4_named_attr_get(
7680 nfsnode_t np,
7681 struct componentname *cnp,
7682 uint32_t accessMode,
7683 int flags,
7684 vfs_context_t ctx,
7685 nfsnode_t *anpp,
7686 struct nfs_open_file **nofpp)
7687{
7688 struct nfsmount *nmp;
7689 int error = 0, open_error = EIO;
7690 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7691 int create, guarded, prefetch, truncate, noopbusy = 0;
7692 int open, status, numops, hadattrdir, negnamecache;
7693 struct nfs_vattr *nvattr;
7694 struct vnode_attr vattr;
7695 nfsnode_t adnp = NULL, anp = NULL;
7696 vnode_t avp = NULL;
7697 u_int64_t xid = 0, savedxid = 0;
7698 struct nfsm_chain nmreq, nmrep;
7699 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7700 uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen;
7701 nfs_stateid stateid, dstateid;
7702 fhandle_t *fh;
7703 struct nfs_open_owner *noop = NULL;
7704 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7705 struct vnop_access_args naa;
7706 thread_t thd;
7707 kauth_cred_t cred;
7708 struct timeval now;
7709 char sbuf[64], *s;
7710 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7711 struct kauth_ace ace;
7712 struct nfsreq *req;
7713 struct nfsreq_secinfo_args si;
7714
7715 *anpp = NULL;
7716 rflags = delegation = recall = eof = rlen = retlen = 0;
7717 ace.ace_flags = 0;
7718 s = sbuf;
7719 slen = sizeof(sbuf);
7720
7721 nmp = NFSTONMP(np);
7722 if (nfs_mount_gone(nmp)) {
7723 return ENXIO;
7724 }
7725 fh = zalloc(nfs_fhandle_zone);
7726 req = zalloc(nfs_req_zone);
7727 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7728 NVATTR_INIT(nvattr);
7729 fh->fh_len = 0;
7730 bzero(&dstateid, sizeof(dstateid));
7731 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7732 thd = vfs_context_thread(ctx);
7733 cred = vfs_context_ucred(ctx);
7734 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7735 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7736 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7737 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7738
7739 if (!create) {
7740 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
7741 if (error) {
7742 goto out_free;
7743 }
7744 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7745 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7746 error = ENOATTR;
7747 goto out_free;
7748 }
7749 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7750 /* shouldn't happen... but just be safe */
7751 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7752 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7753 }
7754 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7755 if (open) {
7756 /*
7757 * We're trying to open the file.
7758 * We'll create/open it with the given access mode,
7759 * and set NFS_OPEN_FILE_CREATE.
7760 */
7761 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7762 if (prefetch && guarded) {
7763 prefetch = 0; /* no sense prefetching data that can't be there */
7764 }
7765 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7766 if (!noop) {
7767 error = ENOMEM;
7768 goto out_free;
7769 }
7770 }
7771
7772 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7773 goto out_free;
7774 }
7775
7776 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7777 hadattrdir = (adnp != NULL);
7778 if (prefetch) {
7779 microuptime(&now);
7780 /* use the special state ID because we don't have a real one to send */
7781 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7782 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7783 }
7784 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7785 nfsm_chain_null(&nmreq);
7786 nfsm_chain_null(&nmrep);
7787
7788 if (hadattrdir) {
7789 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7790 goto nfsmout;
7791 }
7792 /* nfs_getattr() will check changed and purge caches */
7793 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7794 nfsmout_if(error);
7795 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7796 switch (error) {
7797 case ENOENT:
7798 /* negative cache entry */
7799 goto nfsmout;
7800 case 0:
7801 /* cache miss */
7802 /* try dir buf cache lookup */
7803 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL);
7804 if (!error && anp) {
7805 /* dir buf cache hit */
7806 *anpp = anp;
7807 error = -1;
7808 }
7809 if (error != -1) { /* cache miss */
7810 break;
7811 }
7812 OS_FALLTHROUGH;
7813 case -1:
7814 /* cache hit, not really an error */
7815 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7816 if (!anp && avp) {
7817 *anpp = anp = VTONFS(avp);
7818 }
7819
7820 nfs_node_clear_busy(adnp);
7821 adbusyerror = ENOENT;
7822
7823 /* check for directory access */
7824 naa.a_desc = &vnop_access_desc;
7825 naa.a_vp = NFSTOV(adnp);
7826 naa.a_action = KAUTH_VNODE_SEARCH;
7827 naa.a_context = ctx;
7828
7829 /* compute actual success/failure based on accessibility */
7830 error = nfs_vnop_access(&naa);
7831 OS_FALLTHROUGH;
7832 default:
7833 /* we either found it, or hit an error */
7834 if (!error && guarded) {
7835 /* found cached entry but told not to use it */
7836 error = EEXIST;
7837 vnode_put(NFSTOV(anp));
7838 *anpp = anp = NULL;
7839 }
7840 /* we're done if error or we don't need to open */
7841 if (error || !open) {
7842 goto nfsmout;
7843 }
7844 /* no error and we need to open... */
7845 }
7846 }
7847
7848 if (open) {
7849restart:
7850 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7851 if (error) {
7852 nfs_open_owner_rele(noop);
7853 noop = NULL;
7854 goto nfsmout;
7855 }
7856 inuse = 1;
7857
7858 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7859 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7860 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7861 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7862 error = EIO;
7863 }
7864 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7865 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7866 nfs_open_file_destroy(newnofp);
7867 newnofp = NULL;
7868 if (!error) {
7869 nfs_mount_state_in_use_end(nmp, 0);
7870 inuse = 0;
7871 goto restart;
7872 }
7873 }
7874 if (!error) {
7875 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7876 }
7877 if (error) {
7878 if (newnofp) {
7879 nfs_open_file_destroy(newnofp);
7880 }
7881 newnofp = NULL;
7882 goto nfsmout;
7883 }
7884 if (anp) {
7885 /*
7886 * We already have the node. So we just need to open
7887 * it - which we may be able to do with a delegation.
7888 */
7889 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7890 if (!error) {
7891 /* open succeeded, so our open file is no longer temporary */
7892 nofp = newnofp;
7893 nofpbusyerror = 0;
7894 newnofp = NULL;
7895 if (nofpp) {
7896 *nofpp = nofp;
7897 }
7898 }
7899 goto nfsmout;
7900 }
7901 }
7902
7903 /*
7904 * We either don't have the attrdir or we didn't find the attribute
7905 * in the name cache, so we need to talk to the server.
7906 *
7907 * If we don't have the attrdir, we'll need to ask the server for that too.
7908 * If the caller is requesting that the attribute be created, we need to
7909 * make sure the attrdir is created.
7910 * The caller may also request that the first block of an existing attribute
7911 * be retrieved at the same time.
7912 */
7913
7914 if (open) {
7915 /* need to mark the open owner busy during the RPC */
7916 if ((error = nfs_open_owner_set_busy(noop, thd))) {
7917 goto nfsmout;
7918 }
7919 noopbusy = 1;
7920 }
7921
7922 /*
7923 * We'd like to get updated post-open/lookup attributes for the
7924 * directory and we may also want to prefetch some data via READ.
7925 * We'd like the READ results to be last so that we can leave the
7926 * data in the mbufs until the end.
7927 *
7928 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7929 */
7930 numops = 5;
7931 if (!hadattrdir) {
7932 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7933 }
7934 if (prefetch) {
7935 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7936 }
7937 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7938 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7939 if (hadattrdir) {
7940 numops--;
7941 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7942 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7943 } else {
7944 numops--;
7945 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7946 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7947 numops--;
7948 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7949 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7950 numops--;
7951 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7952 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7953 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7954 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7955 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7956 }
7957 if (open) {
7958 numops--;
7959 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7960 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7961 nfsm_chain_add_32(error, &nmreq, accessMode);
7962 nfsm_chain_add_32(error, &nmreq, denyMode);
7963 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7964 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7965 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7966 nfsm_chain_add_32(error, &nmreq, create);
7967 if (create) {
7968 nfsm_chain_add_32(error, &nmreq, guarded);
7969 VATTR_INIT(&vattr);
7970 if (truncate) {
7971 VATTR_SET(&vattr, va_data_size, 0);
7972 }
7973 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7974 }
7975 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7976 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7977 } else {
7978 numops--;
7979 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7980 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7981 }
7982 numops--;
7983 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7984 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7985 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7986 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7987 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7988 if (prefetch) {
7989 numops--;
7990 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7991 }
7992 if (hadattrdir) {
7993 numops--;
7994 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7995 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7996 } else {
7997 numops--;
7998 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7999 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
8000 numops--;
8001 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
8002 nfsm_chain_add_32(error, &nmreq, 0);
8003 }
8004 numops--;
8005 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
8006 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
8007 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
8008 if (prefetch) {
8009 numops--;
8010 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
8011 numops--;
8012 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
8013 VATTR_INIT(&vattr);
8014 VATTR_SET(&vattr, va_data_size, 0);
8015 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8016 numops--;
8017 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
8018 nfsm_chain_add_stateid(error, &nmreq, &stateid);
8019 nfsm_chain_add_64(error, &nmreq, 0);
8020 nfsm_chain_add_32(error, &nmreq, rlen);
8021 }
8022 nfsm_chain_build_done(error, &nmreq);
8023 nfsm_assert(error, (numops == 0), EPROTO);
8024 nfsmout_if(error);
8025 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
8026 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
8027 if (!error) {
8028 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
8029 }
8030
8031 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
8032 error = adlockerror;
8033 }
8034 savedxid = xid;
8035 nfsm_chain_skip_tag(error, &nmrep);
8036 nfsm_chain_get_32(error, &nmrep, numops);
8037 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8038 if (!hadattrdir) {
8039 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8040 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8041 nfsmout_if(error);
8042 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8043 nfsmout_if(error);
8044 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) {
8045 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
8046 /* (re)allocate attrdir fh buffer */
8047 if (np->n_attrdirfh) {
8048 FREE(np->n_attrdirfh, M_TEMP);
8049 }
8050 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
8051 }
8052 if (np->n_attrdirfh) {
8053 /* remember the attrdir fh in the node */
8054 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8055 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
8056 /* create busied node for attrdir */
8057 struct componentname cn;
8058 bzero(&cn, sizeof(cn));
8059 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
8060 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
8061 cn.cn_nameiop = LOOKUP;
8062 // XXX can't set parent correctly (to np) yet
8063 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
8064 if (!error) {
8065 adlockerror = 0;
8066 /* set the node busy */
8067 SET(adnp->n_flag, NBUSY);
8068 adbusyerror = 0;
8069 }
8070 /* if no adnp, oh well... */
8071 error = 0;
8072 }
8073 }
8074 NVATTR_CLEANUP(nvattr);
8075 fh->fh_len = 0;
8076 }
8077 if (open) {
8078 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
8079 nfs_owner_seqid_increment(noop, NULL, error);
8080 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
8081 nfsm_chain_check_change_info(error, &nmrep, adnp);
8082 nfsm_chain_get_32(error, &nmrep, rflags);
8083 bmlen = NFS_ATTR_BITMAP_LEN;
8084 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
8085 nfsm_chain_get_32(error, &nmrep, delegation);
8086 if (!error) {
8087 switch (delegation) {
8088 case NFS_OPEN_DELEGATE_NONE:
8089 break;
8090 case NFS_OPEN_DELEGATE_READ:
8091 case NFS_OPEN_DELEGATE_WRITE:
8092 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
8093 nfsm_chain_get_32(error, &nmrep, recall);
8094 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
8095 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
8096 }
8097 /* if we have any trouble accepting the ACE, just invalidate it */
8098 ace_type = ace_flags = ace_mask = len = 0;
8099 nfsm_chain_get_32(error, &nmrep, ace_type);
8100 nfsm_chain_get_32(error, &nmrep, ace_flags);
8101 nfsm_chain_get_32(error, &nmrep, ace_mask);
8102 nfsm_chain_get_32(error, &nmrep, len);
8103 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
8104 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
8105 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
8106 if (!error && (len >= slen)) {
8107 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
8108 if (s) {
8109 slen = len + 1;
8110 } else {
8111 ace.ace_flags = 0;
8112 }
8113 }
8114 if (s) {
8115 nfsm_chain_get_opaque(error, &nmrep, len, s);
8116 } else {
8117 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
8118 }
8119 if (!error && s) {
8120 s[len] = '\0';
8121 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
8122 ace.ace_flags = 0;
8123 }
8124 }
8125 if (error || !s) {
8126 ace.ace_flags = 0;
8127 }
8128 if (s && (s != sbuf)) {
8129 FREE(s, M_TEMP);
8130 }
8131 break;
8132 default:
8133 error = EBADRPC;
8134 break;
8135 }
8136 }
8137 /* At this point if we have no error, the object was created/opened. */
8138 open_error = error;
8139 } else {
8140 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8141 }
8142 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8143 nfsmout_if(error);
8144 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8145 nfsmout_if(error);
8146 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
8147 error = EIO;
8148 goto nfsmout;
8149 }
8150 if (prefetch) {
8151 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
8152 }
8153 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8154 if (!hadattrdir) {
8155 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8156 }
8157 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8158 nfsmout_if(error);
8159 xid = savedxid;
8160 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8161 nfsmout_if(error);
8162
8163 if (open) {
8164 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8165 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8166 }
8167 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8168 if (adnp) {
8169 nfs_node_unlock(adnp);
8170 adlockerror = ENOENT;
8171 }
8172 NVATTR_CLEANUP(nvattr);
8173 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid);
8174 nfsmout_if(error);
8175 savedxid = xid;
8176 if ((adlockerror = nfs_node_lock(adnp))) {
8177 error = adlockerror;
8178 }
8179 }
8180 }
8181
8182nfsmout:
8183 if (open && adnp && !adlockerror) {
8184 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8185 adnp->n_flag &= ~NNEGNCENTRIES;
8186 cache_purge_negatives(NFSTOV(adnp));
8187 }
8188 adnp->n_flag |= NMODIFIED;
8189 nfs_node_unlock(adnp);
8190 adlockerror = ENOENT;
8191 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8192 }
8193 if (adnp && !adlockerror && (error == ENOENT) &&
8194 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8195 /* add a negative entry in the name cache */
8196 cache_enter(NFSTOV(adnp), NULL, cnp);
8197 adnp->n_flag |= NNEGNCENTRIES;
8198 }
8199 if (adnp && !adlockerror) {
8200 nfs_node_unlock(adnp);
8201 adlockerror = ENOENT;
8202 }
8203 if (!error && !anp && fh->fh_len) {
8204 /* create the vnode with the filehandle and attributes */
8205 xid = savedxid;
8206 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp);
8207 if (!error) {
8208 *anpp = anp;
8209 nfs_node_unlock(anp);
8210 }
8211 if (!error && open) {
8212 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8213 /* After we have a node, add our open file struct to the node */
8214 nofp = newnofp;
8215 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8216 if (error) {
8217 /* This shouldn't happen, because we passed in a new nofp to use. */
8218 printf("nfs_open_file_find_internal failed! %d\n", error);
8219 nofp = NULL;
8220 } else if (nofp != newnofp) {
8221 /*
8222 * Hmm... an open file struct already exists.
8223 * Mark the existing one busy and merge our open into it.
8224 * Then destroy the one we created.
8225 * Note: there's no chance of an open confict because the
8226 * open has already been granted.
8227 */
8228 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8229 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8230 nofp->nof_stateid = newnofp->nof_stateid;
8231 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8232 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8233 }
8234 nfs_open_file_clear_busy(newnofp);
8235 nfs_open_file_destroy(newnofp);
8236 newnofp = NULL;
8237 }
8238 if (!error) {
8239 newnofp = NULL;
8240 nofpbusyerror = 0;
8241 /* mark the node as holding a create-initiated open */
8242 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8243 nofp->nof_creator = current_thread();
8244 if (nofpp) {
8245 *nofpp = nofp;
8246 }
8247 }
8248 }
8249 }
8250 NVATTR_CLEANUP(nvattr);
8251 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8252 if (!error && anp && !recall) {
8253 /* stuff the delegation state in the node */
8254 lck_mtx_lock(&anp->n_openlock);
8255 anp->n_openflags &= ~N_DELEG_MASK;
8256 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8257 anp->n_dstateid = dstateid;
8258 anp->n_dace = ace;
8259 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8260 lck_mtx_lock(&nmp->nm_lock);
8261 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8262 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8263 }
8264 lck_mtx_unlock(&nmp->nm_lock);
8265 }
8266 lck_mtx_unlock(&anp->n_openlock);
8267 } else {
8268 /* give the delegation back */
8269 if (anp) {
8270 if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) {
8271 /* update delegation state and return it */
8272 lck_mtx_lock(&anp->n_openlock);
8273 anp->n_openflags &= ~N_DELEG_MASK;
8274 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8275 anp->n_dstateid = dstateid;
8276 anp->n_dace = ace;
8277 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8278 lck_mtx_lock(&nmp->nm_lock);
8279 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8280 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8281 }
8282 lck_mtx_unlock(&nmp->nm_lock);
8283 }
8284 lck_mtx_unlock(&anp->n_openlock);
8285 /* don't need to send a separate delegreturn for fh */
8286 fh->fh_len = 0;
8287 }
8288 /* return anp's current delegation */
8289 nfs4_delegation_return(anp, 0, thd, cred);
8290 }
8291 if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */
8292 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
8293 }
8294 }
8295 }
8296 if (open) {
8297 if (newnofp) {
8298 /* need to cleanup our temporary nofp */
8299 nfs_open_file_clear_busy(newnofp);
8300 nfs_open_file_destroy(newnofp);
8301 newnofp = NULL;
8302 } else if (nofp && !nofpbusyerror) {
8303 nfs_open_file_clear_busy(nofp);
8304 nofpbusyerror = ENOENT;
8305 }
8306 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8307 inuse = 0;
8308 nofp = newnofp = NULL;
8309 rflags = delegation = recall = eof = rlen = retlen = 0;
8310 ace.ace_flags = 0;
8311 s = sbuf;
8312 slen = sizeof(sbuf);
8313 nfsm_chain_cleanup(&nmreq);
8314 nfsm_chain_cleanup(&nmrep);
8315 if (anp) {
8316 vnode_put(NFSTOV(anp));
8317 *anpp = anp = NULL;
8318 }
8319 hadattrdir = (adnp != NULL);
8320 if (noopbusy) {
8321 nfs_open_owner_clear_busy(noop);
8322 noopbusy = 0;
8323 }
8324 goto restart;
8325 }
8326 inuse = 0;
8327 if (noop) {
8328 if (noopbusy) {
8329 nfs_open_owner_clear_busy(noop);
8330 noopbusy = 0;
8331 }
8332 nfs_open_owner_rele(noop);
8333 }
8334 }
8335 if (!error && prefetch && nmrep.nmc_mhead) {
8336 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8337 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8338 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8339 nfsm_chain_get_32(error, &nmrep, eof);
8340 nfsm_chain_get_32(error, &nmrep, retlen);
8341 if (!error && anp) {
8342 /*
8343 * There can be one problem with doing the prefetch.
8344 * Because we don't have the node before we start the RPC, we
8345 * can't have the buffer busy while the READ is performed.
8346 * So there is a chance that other I/O occured on the same
8347 * range of data while we were performing this RPC. If that
8348 * happens, then it's possible the data we have in the READ
8349 * response is no longer up to date.
8350 * Once we have the node and the buffer, we need to make sure
8351 * that there's no chance we could be putting stale data in
8352 * the buffer.
8353 * So, we check if the range read is dirty or if any I/O may
8354 * have occured on it while we were performing our RPC.
8355 */
8356 struct nfsbuf *bp = NULL;
8357 int lastpg;
8358 nfsbufpgs pagemask, pagemaskand;
8359
8360 retlen = MIN(retlen, rlen);
8361
8362 /* check if node needs size update or invalidation */
8363 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8364 nfs_data_update_size(anp, 0);
8365 }
8366 if (!(error = nfs_node_lock(anp))) {
8367 if (anp->n_flag & NNEEDINVALIDATE) {
8368 anp->n_flag &= ~NNEEDINVALIDATE;
8369 nfs_node_unlock(anp);
8370 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8371 if (!error) { /* lets play it safe and just drop the data */
8372 error = EIO;
8373 }
8374 } else {
8375 nfs_node_unlock(anp);
8376 }
8377 }
8378
8379 /* calculate page mask for the range of data read */
8380 lastpg = (retlen - 1) / PAGE_SIZE;
8381 nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1);
8382
8383 if (!error) {
8384 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8385 }
8386 /* don't save the data if dirty or potential I/O conflict */
8387 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
8388 if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) &&
8389 timevalcmp(&anp->n_lastio, &now, <)) {
8390 OSAddAtomic64(1, &nfsstats.read_bios);
8391 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8392 SET(bp->nb_flags, NB_READ);
8393 NFS_BUF_MAP(bp);
8394 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8395 if (error) {
8396 bp->nb_error = error;
8397 SET(bp->nb_flags, NB_ERROR);
8398 } else {
8399 bp->nb_offio = 0;
8400 bp->nb_endio = rlen;
8401 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8402 bp->nb_endio = retlen;
8403 }
8404 if (eof || (retlen == 0)) {
8405 /* zero out the remaining data (up to EOF) */
8406 off_t rpcrem, eofrem, rem;
8407 rpcrem = (rlen - retlen);
8408 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8409 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8410 if (rem > 0) {
8411 bzero(bp->nb_data + retlen, rem);
8412 }
8413 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8414 /* ugh... short read ... just invalidate for now... */
8415 SET(bp->nb_flags, NB_INVAL);
8416 }
8417 }
8418 nfs_buf_read_finish(bp);
8419 microuptime(&anp->n_lastio);
8420 }
8421 if (bp) {
8422 nfs_buf_release(bp, 1);
8423 }
8424 }
8425 error = 0; /* ignore any transient error in processing the prefetch */
8426 }
8427 if (adnp && !adbusyerror) {
8428 nfs_node_clear_busy(adnp);
8429 adbusyerror = ENOENT;
8430 }
8431 if (!busyerror) {
8432 nfs_node_clear_busy(np);
8433 busyerror = ENOENT;
8434 }
8435 if (adnp) {
8436 vnode_put(NFSTOV(adnp));
8437 }
8438 if (inuse) {
8439 nfs_mount_state_in_use_end(nmp, error);
8440 }
8441 if (error && *anpp) {
8442 vnode_put(NFSTOV(*anpp));
8443 *anpp = NULL;
8444 }
8445 nfsm_chain_cleanup(&nmreq);
8446 nfsm_chain_cleanup(&nmrep);
8447out_free:
8448 NFS_ZFREE(nfs_fhandle_zone, fh);
8449 NFS_ZFREE(nfs_req_zone, req);
8450 FREE(nvattr, M_TEMP);
8451 return error;
8452}
8453
8454/*
8455 * Remove a named attribute.
8456 */
8457int
8458nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8459{
8460 nfsnode_t adnp = NULL;
8461 struct nfsmount *nmp;
8462 struct componentname cn;
8463 struct vnop_remove_args vra;
8464 int error, putanp = 0;
8465
8466 nmp = NFSTONMP(np);
8467 if (nfs_mount_gone(nmp)) {
8468 return ENXIO;
8469 }
8470
8471 bzero(&cn, sizeof(cn));
8472 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8473 cn.cn_namelen = NFS_STRLEN_INT(name);
8474 cn.cn_nameiop = DELETE;
8475 cn.cn_flags = 0;
8476
8477 if (!anp) {
8478 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8479 0, ctx, &anp, NULL);
8480 if ((!error && !anp) || (error == ENOATTR)) {
8481 error = ENOENT;
8482 }
8483 if (error) {
8484 if (anp) {
8485 vnode_put(NFSTOV(anp));
8486 anp = NULL;
8487 }
8488 goto out;
8489 }
8490 putanp = 1;
8491 }
8492
8493 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8494 goto out;
8495 }
8496 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8497 nfs_node_clear_busy(np);
8498 if (!adnp) {
8499 error = ENOENT;
8500 goto out;
8501 }
8502
8503 vra.a_desc = &vnop_remove_desc;
8504 vra.a_dvp = NFSTOV(adnp);
8505 vra.a_vp = NFSTOV(anp);
8506 vra.a_cnp = &cn;
8507 vra.a_flags = 0;
8508 vra.a_context = ctx;
8509 error = nfs_vnop_remove(&vra);
8510out:
8511 if (adnp) {
8512 vnode_put(NFSTOV(adnp));
8513 }
8514 if (putanp) {
8515 vnode_put(NFSTOV(anp));
8516 }
8517 return error;
8518}
8519
8520int
8521nfs4_vnop_getxattr(
8522 struct vnop_getxattr_args /* {
8523 * struct vnodeop_desc *a_desc;
8524 * vnode_t a_vp;
8525 * const char * a_name;
8526 * uio_t a_uio;
8527 * size_t *a_size;
8528 * int a_options;
8529 * vfs_context_t a_context;
8530 * } */*ap)
8531{
8532 vfs_context_t ctx = ap->a_context;
8533 struct nfsmount *nmp;
8534 struct nfs_vattr *nvattr;
8535 struct componentname cn;
8536 nfsnode_t anp;
8537 int error = 0, isrsrcfork;
8538
8539 nmp = VTONMP(ap->a_vp);
8540 if (nfs_mount_gone(nmp)) {
8541 return ENXIO;
8542 }
8543
8544 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8545 return ENOTSUP;
8546 }
8547
8548 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8549 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
8550 if (error) {
8551 goto out;
8552 }
8553 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8554 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8555 error = ENOATTR;
8556 goto out;
8557 }
8558
8559 bzero(&cn, sizeof(cn));
8560 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8561 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
8562 cn.cn_nameiop = LOOKUP;
8563 cn.cn_flags = MAKEENTRY;
8564
8565 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8566 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8567
8568 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8569 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8570 if ((!error && !anp) || (error == ENOENT)) {
8571 error = ENOATTR;
8572 }
8573 if (!error) {
8574 if (ap->a_uio) {
8575 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8576 } else {
8577 *ap->a_size = anp->n_size;
8578 }
8579 }
8580 if (anp) {
8581 vnode_put(NFSTOV(anp));
8582 }
8583out:
8584 FREE(nvattr, M_TEMP);
8585 return error;
8586}
8587
8588int
8589nfs4_vnop_setxattr(
8590 struct vnop_setxattr_args /* {
8591 * struct vnodeop_desc *a_desc;
8592 * vnode_t a_vp;
8593 * const char * a_name;
8594 * uio_t a_uio;
8595 * int a_options;
8596 * vfs_context_t a_context;
8597 * } */*ap)
8598{
8599 vfs_context_t ctx = ap->a_context;
8600 int options = ap->a_options;
8601 uio_t uio = ap->a_uio;
8602 const char *name = ap->a_name;
8603 struct nfsmount *nmp;
8604 struct componentname cn;
8605 nfsnode_t anp = NULL;
8606 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8607#define FINDERINFOSIZE 32
8608 uint8_t finfo[FINDERINFOSIZE];
8609 uint32_t *finfop;
8610 struct nfs_open_file *nofp = NULL;
8611 char uio_buf[UIO_SIZEOF(1)];
8612 uio_t auio;
8613 struct vnop_write_args vwa;
8614
8615 nmp = VTONMP(ap->a_vp);
8616 if (nfs_mount_gone(nmp)) {
8617 return ENXIO;
8618 }
8619
8620 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8621 return ENOTSUP;
8622 }
8623
8624 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8625 return EINVAL;
8626 }
8627
8628 /* XXX limitation based on need to back up uio on short write */
8629 if (uio_iovcnt(uio) > 1) {
8630 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8631 return EINVAL;
8632 }
8633
8634 bzero(&cn, sizeof(cn));
8635 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8636 cn.cn_namelen = NFS_STRLEN_INT(name);
8637 cn.cn_nameiop = CREATE;
8638 cn.cn_flags = MAKEENTRY;
8639
8640 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8641 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8642 if (!isrsrcfork) {
8643 uio_setoffset(uio, 0);
8644 }
8645 if (isfinderinfo) {
8646 if (uio_resid(uio) != sizeof(finfo)) {
8647 return ERANGE;
8648 }
8649 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8650 if (error) {
8651 return error;
8652 }
8653 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8654 empty = 1;
8655 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8656 if (finfop[i]) {
8657 empty = 0;
8658 break;
8659 }
8660 }
8661 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8662 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8663 if (error == ENOENT) {
8664 error = 0;
8665 }
8666 return error;
8667 }
8668 /* first, let's see if we get a create/replace error */
8669 }
8670
8671 /*
8672 * create/open the xattr
8673 *
8674 * We need to make sure not to create it if XATTR_REPLACE.
8675 * For all xattrs except the resource fork, we also want to
8676 * truncate the xattr to remove any current data. We'll do
8677 * that by setting the size to 0 on create/open.
8678 */
8679 flags = 0;
8680 if (!(options & XATTR_REPLACE)) {
8681 flags |= NFS_GET_NAMED_ATTR_CREATE;
8682 }
8683 if (options & XATTR_CREATE) {
8684 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8685 }
8686 if (!isrsrcfork) {
8687 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8688 }
8689
8690 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8691 flags, ctx, &anp, &nofp);
8692 if (!error && !anp) {
8693 error = ENOATTR;
8694 }
8695 if (error) {
8696 goto out;
8697 }
8698 /* grab the open state from the get/create/open */
8699 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8700 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8701 nofp->nof_creator = NULL;
8702 nfs_open_file_clear_busy(nofp);
8703 }
8704
8705 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8706 if (isfinderinfo && empty) {
8707 goto doclose;
8708 }
8709
8710 /*
8711 * Write the data out and flush.
8712 *
8713 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8714 */
8715 vwa.a_desc = &vnop_write_desc;
8716 vwa.a_vp = NFSTOV(anp);
8717 vwa.a_uio = NULL;
8718 vwa.a_ioflag = 0;
8719 vwa.a_context = ctx;
8720 if (isfinderinfo) {
8721 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8722 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8723 vwa.a_uio = auio;
8724 } else if (uio_resid(uio) > 0) {
8725 vwa.a_uio = uio;
8726 }
8727 if (vwa.a_uio) {
8728 error = nfs_vnop_write(&vwa);
8729 if (!error) {
8730 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8731 }
8732 }
8733doclose:
8734 /* Close the xattr. */
8735 if (nofp) {
8736 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8737 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8738 if (!busyerror) {
8739 nfs_open_file_clear_busy(nofp);
8740 }
8741 }
8742 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8743 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8744 if (error == ENOENT) {
8745 error = 0;
8746 }
8747 }
8748 if (!error) {
8749 error = closeerror;
8750 }
8751out:
8752 if (anp) {
8753 vnode_put(NFSTOV(anp));
8754 }
8755 if (error == ENOENT) {
8756 error = ENOATTR;
8757 }
8758 return error;
8759}
8760
8761int
8762nfs4_vnop_removexattr(
8763 struct vnop_removexattr_args /* {
8764 * struct vnodeop_desc *a_desc;
8765 * vnode_t a_vp;
8766 * const char * a_name;
8767 * int a_options;
8768 * vfs_context_t a_context;
8769 * } */*ap)
8770{
8771 struct nfsmount *nmp = VTONMP(ap->a_vp);
8772 int error;
8773
8774 if (nfs_mount_gone(nmp)) {
8775 return ENXIO;
8776 }
8777 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8778 return ENOTSUP;
8779 }
8780
8781 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8782 if (error == ENOENT) {
8783 error = ENOATTR;
8784 }
8785 return error;
8786}
8787
8788int
8789nfs4_vnop_listxattr(
8790 struct vnop_listxattr_args /* {
8791 * struct vnodeop_desc *a_desc;
8792 * vnode_t a_vp;
8793 * uio_t a_uio;
8794 * size_t *a_size;
8795 * int a_options;
8796 * vfs_context_t a_context;
8797 * } */*ap)
8798{
8799 vfs_context_t ctx = ap->a_context;
8800 nfsnode_t np = VTONFS(ap->a_vp);
8801 uio_t uio = ap->a_uio;
8802 nfsnode_t adnp = NULL;
8803 struct nfsmount *nmp;
8804 int error, done, i;
8805 struct nfs_vattr *nvattr;
8806 uint64_t cookie, nextcookie, lbn = 0;
8807 struct nfsbuf *bp = NULL;
8808 struct nfs_dir_buf_header *ndbhp;
8809 struct direntry *dp;
8810
8811 nmp = VTONMP(ap->a_vp);
8812 if (nfs_mount_gone(nmp)) {
8813 return ENXIO;
8814 }
8815
8816 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8817 return ENOTSUP;
8818 }
8819
8820 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8821 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
8822 if (error) {
8823 goto out_free;
8824 }
8825 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8826 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8827 error = 0;
8828 goto out_free;
8829 }
8830
8831 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8832 goto out_free;
8833 }
8834 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8835 nfs_node_clear_busy(np);
8836 if (!adnp) {
8837 goto out;
8838 }
8839
8840 if ((error = nfs_node_lock(adnp))) {
8841 goto out;
8842 }
8843
8844 if (adnp->n_flag & NNEEDINVALIDATE) {
8845 adnp->n_flag &= ~NNEEDINVALIDATE;
8846 nfs_invaldir(adnp);
8847 nfs_node_unlock(adnp);
8848 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8849 if (!error) {
8850 error = nfs_node_lock(adnp);
8851 }
8852 if (error) {
8853 goto out;
8854 }
8855 }
8856
8857 /*
8858 * check for need to invalidate when (re)starting at beginning
8859 */
8860 if (adnp->n_flag & NMODIFIED) {
8861 nfs_invaldir(adnp);
8862 nfs_node_unlock(adnp);
8863 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
8864 goto out;
8865 }
8866 } else {
8867 nfs_node_unlock(adnp);
8868 }
8869 /* nfs_getattr() will check changed and purge caches */
8870 if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_UNCACHED))) {
8871 goto out;
8872 }
8873
8874 if (uio && (uio_resid(uio) == 0)) {
8875 goto out;
8876 }
8877
8878 done = 0;
8879 nextcookie = lbn = 0;
8880
8881 while (!error && !done) {
8882 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8883 cookie = nextcookie;
8884getbuffer:
8885 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8886 if (error) {
8887 goto out;
8888 }
8889 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8890 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8891 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8892 ndbhp->ndbh_flags = 0;
8893 ndbhp->ndbh_count = 0;
8894 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8895 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8896 }
8897 error = nfs_buf_readdir(bp, ctx);
8898 if (error == NFSERR_DIRBUFDROPPED) {
8899 goto getbuffer;
8900 }
8901 if (error) {
8902 nfs_buf_release(bp, 1);
8903 }
8904 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8905 if (!nfs_node_lock(adnp)) {
8906 nfs_invaldir(adnp);
8907 nfs_node_unlock(adnp);
8908 }
8909 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8910 if (error == NFSERR_BAD_COOKIE) {
8911 error = ENOENT;
8912 }
8913 }
8914 if (error) {
8915 goto out;
8916 }
8917 }
8918
8919 /* go through all the entries copying/counting */
8920 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8921 for (i = 0; i < ndbhp->ndbh_count; i++) {
8922 if (!xattr_protected(dp->d_name)) {
8923 if (uio == NULL) {
8924 *ap->a_size += dp->d_namlen + 1;
8925 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8926 error = ERANGE;
8927 } else {
8928 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8929 if (error && (error != EFAULT)) {
8930 error = ERANGE;
8931 }
8932 }
8933 }
8934 nextcookie = dp->d_seekoff;
8935 dp = NFS_DIRENTRY_NEXT(dp);
8936 }
8937
8938 if (i == ndbhp->ndbh_count) {
8939 /* hit end of buffer, move to next buffer */
8940 lbn = nextcookie;
8941 /* if we also hit EOF, we're done */
8942 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
8943 done = 1;
8944 }
8945 }
8946 if (!error && !done && (nextcookie == cookie)) {
8947 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8948 error = EIO;
8949 }
8950 nfs_buf_release(bp, 1);
8951 }
8952out:
8953 if (adnp) {
8954 vnode_put(NFSTOV(adnp));
8955 }
8956out_free:
8957 FREE(nvattr, M_TEMP);
8958 return error;
8959}
8960
8961#if NAMEDSTREAMS
8962int
8963nfs4_vnop_getnamedstream(
8964 struct vnop_getnamedstream_args /* {
8965 * struct vnodeop_desc *a_desc;
8966 * vnode_t a_vp;
8967 * vnode_t *a_svpp;
8968 * const char *a_name;
8969 * enum nsoperation a_operation;
8970 * int a_flags;
8971 * vfs_context_t a_context;
8972 * } */*ap)
8973{
8974 vfs_context_t ctx = ap->a_context;
8975 struct nfsmount *nmp;
8976 struct nfs_vattr *nvattr;
8977 struct componentname cn;
8978 nfsnode_t anp;
8979 int error = 0;
8980
8981 nmp = VTONMP(ap->a_vp);
8982 if (nfs_mount_gone(nmp)) {
8983 return ENXIO;
8984 }
8985
8986 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8987 return ENOTSUP;
8988 }
8989
8990 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8991 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
8992 if (error) {
8993 goto out;
8994 }
8995 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8996 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8997 error = ENOATTR;
8998 goto out;
8999 }
9000
9001 bzero(&cn, sizeof(cn));
9002 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
9003 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
9004 cn.cn_nameiop = LOOKUP;
9005 cn.cn_flags = MAKEENTRY;
9006
9007 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
9008 0, ctx, &anp, NULL);
9009 if ((!error && !anp) || (error == ENOENT)) {
9010 error = ENOATTR;
9011 }
9012 if (!error && anp) {
9013 *ap->a_svpp = NFSTOV(anp);
9014 } else if (anp) {
9015 vnode_put(NFSTOV(anp));
9016 }
9017out:
9018 FREE(nvattr, M_TEMP);
9019 return error;
9020}
9021
9022int
9023nfs4_vnop_makenamedstream(
9024 struct vnop_makenamedstream_args /* {
9025 * struct vnodeop_desc *a_desc;
9026 * vnode_t *a_svpp;
9027 * vnode_t a_vp;
9028 * const char *a_name;
9029 * int a_flags;
9030 * vfs_context_t a_context;
9031 * } */*ap)
9032{
9033 vfs_context_t ctx = ap->a_context;
9034 struct nfsmount *nmp;
9035 struct componentname cn;
9036 nfsnode_t anp;
9037 int error = 0;
9038
9039 nmp = VTONMP(ap->a_vp);
9040 if (nfs_mount_gone(nmp)) {
9041 return ENXIO;
9042 }
9043
9044 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9045 return ENOTSUP;
9046 }
9047
9048 bzero(&cn, sizeof(cn));
9049 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
9050 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
9051 cn.cn_nameiop = CREATE;
9052 cn.cn_flags = MAKEENTRY;
9053
9054 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
9055 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
9056 if ((!error && !anp) || (error == ENOENT)) {
9057 error = ENOATTR;
9058 }
9059 if (!error && anp) {
9060 *ap->a_svpp = NFSTOV(anp);
9061 } else if (anp) {
9062 vnode_put(NFSTOV(anp));
9063 }
9064 return error;
9065}
9066
9067int
9068nfs4_vnop_removenamedstream(
9069 struct vnop_removenamedstream_args /* {
9070 * struct vnodeop_desc *a_desc;
9071 * vnode_t a_vp;
9072 * vnode_t a_svp;
9073 * const char *a_name;
9074 * int a_flags;
9075 * vfs_context_t a_context;
9076 * } */*ap)
9077{
9078 struct nfsmount *nmp = VTONMP(ap->a_vp);
9079 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
9080 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
9081
9082 if (nfs_mount_gone(nmp)) {
9083 return ENXIO;
9084 }
9085
9086 /*
9087 * Given that a_svp is a named stream, checking for
9088 * named attribute support is kinda pointless.
9089 */
9090 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9091 return ENOTSUP;
9092 }
9093
9094 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
9095}
9096
9097#endif
9098#endif /* CONFIG_NFS4 */
9099
9100#endif /* CONFIG_NFS_CLIENT */