]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs4_vnops.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <nfs/nfs_conf.h>
30#if CONFIG_NFS_CLIENT
31
32/*
33 * vnode op calls for NFS version 4
34 */
35#include <sys/param.h>
36#include <sys/kernel.h>
37#include <sys/systm.h>
38#include <sys/resourcevar.h>
39#include <sys/proc_internal.h>
40#include <sys/kauth.h>
41#include <sys/mount_internal.h>
42#include <sys/malloc.h>
43#include <sys/kpi_mbuf.h>
44#include <sys/conf.h>
45#include <sys/vnode_internal.h>
46#include <sys/dirent.h>
47#include <sys/fcntl.h>
48#include <sys/lockf.h>
49#include <sys/ubc_internal.h>
50#include <sys/attr.h>
51#include <sys/signalvar.h>
52#include <sys/uio_internal.h>
53#include <sys/xattr.h>
54#include <sys/paths.h>
55
56#include <vfs/vfs_support.h>
57
58#include <sys/vm.h>
59
60#include <sys/time.h>
61#include <kern/clock.h>
62#include <libkern/OSAtomic.h>
63
64#include <miscfs/fifofs/fifo.h>
65#include <miscfs/specfs/specdev.h>
66
67#include <nfs/rpcv2.h>
68#include <nfs/nfsproto.h>
69#include <nfs/nfs.h>
70#include <nfs/nfsnode.h>
71#include <nfs/nfs_gss.h>
72#include <nfs/nfsmount.h>
73#include <nfs/nfs_lock.h>
74#include <nfs/xdr_subs.h>
75#include <nfs/nfsm_subs.h>
76
77#include <net/if.h>
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <vm/vm_kern.h>
81
82#include <kern/task.h>
83#include <kern/sched_prim.h>
84
85#if CONFIG_NFS4
86int
87nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
88{
89 int error = 0, lockerror = ENOENT, status, numops, slot;
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
93 uint32_t access_result = 0, supported = 0, missing;
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
97 struct nfsreq_secinfo_args si;
98
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
116 nfsm_chain_add_32(error, &nmreq, *access);
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
126
127 if ((lockerror = nfs_node_lock(np))) {
128 error = lockerror;
129 }
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
135 nfsm_chain_get_32(error, &nmrep, access_result);
136 nfsmout_if(error);
137 if ((missing = (*access & ~supported))) {
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
146 access_result |= NFS_ACCESS_DELETE;
147 }
148 }
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
156 }
157 if (dvp != NULLVP) {
158 vnode_put(dvp);
159 }
160 }
161 /* Some servers report DELETE support but erroneously give a denied answer. */
162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
163 access_result |= NFS_ACCESS_DELETE;
164 }
165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
167 nfsmout_if(error);
168
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
181 microuptime(&now);
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
184
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
187nfsmout:
188 if (!lockerror) {
189 nfs_node_unlock(np);
190 }
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
193 return error;
194}
195
196int
197nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
202 int flags,
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206{
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
210 struct nfsm_chain nmreq, nmrep;
211 struct nfsreq_secinfo_args si;
212
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
216 nfsvers = nmp->nm_vers;
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
221 return 0;
222 }
223
224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
225 rpcflags = R_RECOVER;
226 }
227
228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
229 rpcflags |= R_SOFT;
230 }
231
232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
236 // PUTFH, GETATTR
237 numops = 2;
238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
246 if ((flags & NGA_ACL) && acls) {
247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
248 }
249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
269nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
272 return error;
273}
274
275int
276nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx)
277{
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
280 size_t len = 0;
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
283 struct nfsreq_secinfo_args si;
284
285 nmp = NFSTONMP(np);
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
312
313 if ((lockerror = nfs_node_lock(np))) {
314 error = lockerror;
315 }
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
325 if (np->n_size && (np->n_size < *buflenp)) {
326 len = np->n_size;
327 } else {
328 len = *buflenp - 1;
329 }
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
332 if (!error) {
333 *buflenp = len;
334 }
335nfsmout:
336 if (!lockerror) {
337 nfs_node_unlock(np);
338 }
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
341 return error;
342}
343
344int
345nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353{
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
356 nfs_stateid stateid;
357 struct nfsm_chain nmreq;
358 struct nfsreq_secinfo_args si;
359
360 nmp = NFSTONMP(np);
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
364 nfsvers = nmp->nm_vers;
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
368
369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
370 nfsm_chain_null(&nmreq);
371
372 // PUTFH, READ
373 numops = 2;
374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
391 return error;
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
398 uio_t uio,
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
409 if (nfs_mount_gone(nmp)) {
410 nfs_request_async_cancel(req);
411 return ENXIO;
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
421
422 if ((lockerror = nfs_node_lock(np))) {
423 error = lockerror;
424 }
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
434 }
435 if (!lockerror) {
436 nfs_node_unlock(np);
437 }
438 if (eofp) {
439 if (!eof && !retlen) {
440 eof = 1;
441 }
442 *eofp = eof;
443 }
444 nfsm_chain_cleanup(&nmrep);
445 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
446 microuptime(&np->n_lastio);
447 }
448 return error;
449}
450
451int
452nfs4_write_rpc_async(
453 nfsnode_t np,
454 uio_t uio,
455 size_t len,
456 thread_t thd,
457 kauth_cred_t cred,
458 int iomode,
459 struct nfsreq_cbinfo *cb,
460 struct nfsreq **reqp)
461{
462 struct nfsmount *nmp;
463 mount_t mp;
464 int error = 0, nfsvers, numops;
465 nfs_stateid stateid;
466 struct nfsm_chain nmreq;
467 struct nfsreq_secinfo_args si;
468
469 nmp = NFSTONMP(np);
470 if (nfs_mount_gone(nmp)) {
471 return ENXIO;
472 }
473 nfsvers = nmp->nm_vers;
474 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
475 return EINVAL;
476 }
477
478 /* for async mounts, don't bother sending sync write requests */
479 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
480 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
481 iomode = NFS_WRITE_UNSTABLE;
482 }
483
484 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
485 nfsm_chain_null(&nmreq);
486
487 // PUTFH, WRITE, GETATTR
488 numops = 3;
489 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
490 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
491 numops--;
492 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
493 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
494 numops--;
495 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
496 nfs_get_stateid(np, thd, cred, &stateid);
497 nfsm_chain_add_stateid(error, &nmreq, &stateid);
498 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
499 nfsm_chain_add_32(error, &nmreq, iomode);
500 nfsm_chain_add_32(error, &nmreq, len);
501 if (!error) {
502 error = nfsm_chain_add_uio(&nmreq, uio, len);
503 }
504 numops--;
505 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
506 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np);
507 nfsm_chain_build_done(error, &nmreq);
508 nfsm_assert(error, (numops == 0), EPROTO);
509 nfsmout_if(error);
510
511 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
512nfsmout:
513 nfsm_chain_cleanup(&nmreq);
514 return error;
515}
516
517int
518nfs4_write_rpc_async_finish(
519 nfsnode_t np,
520 struct nfsreq *req,
521 int *iomodep,
522 size_t *rlenp,
523 uint64_t *wverfp)
524{
525 struct nfsmount *nmp;
526 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
527 int committed = NFS_WRITE_FILESYNC;
528 size_t rlen = 0;
529 u_int64_t xid, wverf;
530 mount_t mp;
531 struct nfsm_chain nmrep;
532
533 nmp = NFSTONMP(np);
534 if (nfs_mount_gone(nmp)) {
535 nfs_request_async_cancel(req);
536 return ENXIO;
537 }
538 nfsvers = nmp->nm_vers;
539
540 nfsm_chain_null(&nmrep);
541
542 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
543 if (error == EINPROGRESS) { /* async request restarted */
544 return error;
545 }
546 nmp = NFSTONMP(np);
547 if (nfs_mount_gone(nmp)) {
548 error = ENXIO;
549 }
550 if (!error && (lockerror = nfs_node_lock(np))) {
551 error = lockerror;
552 }
553 nfsm_chain_skip_tag(error, &nmrep);
554 nfsm_chain_get_32(error, &nmrep, numops);
555 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
556 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
557 nfsm_chain_get_32(error, &nmrep, rlen);
558 nfsmout_if(error);
559 *rlenp = rlen;
560 if (rlen <= 0) {
561 error = NFSERR_IO;
562 }
563 nfsm_chain_get_32(error, &nmrep, committed);
564 nfsm_chain_get_64(error, &nmrep, wverf);
565 nfsmout_if(error);
566 if (wverfp) {
567 *wverfp = wverf;
568 }
569 lck_mtx_lock(&nmp->nm_lock);
570 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
571 nmp->nm_verf = wverf;
572 nmp->nm_state |= NFSSTA_HASWRITEVERF;
573 } else if (nmp->nm_verf != wverf) {
574 nmp->nm_verf = wverf;
575 }
576 lck_mtx_unlock(&nmp->nm_lock);
577 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
578
579 /*
580 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
581 * In such cases, we do not update the time stamp - but the requested attributes.
582 */
583 np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE;
584 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
585 np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE;
586
587nfsmout:
588 if (!lockerror) {
589 nfs_node_unlock(np);
590 }
591 nfsm_chain_cleanup(&nmrep);
592 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
593 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
594 committed = NFS_WRITE_FILESYNC;
595 }
596 *iomodep = committed;
597 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
598 microuptime(&np->n_lastio);
599 }
600 return error;
601}
602
603int
604nfs4_remove_rpc(
605 nfsnode_t dnp,
606 char *name,
607 int namelen,
608 thread_t thd,
609 kauth_cred_t cred)
610{
611 int error = 0, lockerror = ENOENT, remove_error = 0, status;
612 struct nfsmount *nmp;
613 int nfsvers, numops;
614 u_int64_t xid;
615 struct nfsm_chain nmreq, nmrep;
616 struct nfsreq_secinfo_args si;
617
618 nmp = NFSTONMP(dnp);
619 if (nfs_mount_gone(nmp)) {
620 return ENXIO;
621 }
622 nfsvers = nmp->nm_vers;
623 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
624 return EINVAL;
625 }
626 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
627restart:
628 nfsm_chain_null(&nmreq);
629 nfsm_chain_null(&nmrep);
630
631 // PUTFH, REMOVE, GETATTR
632 numops = 3;
633 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
634 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
637 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
640 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
641 numops--;
642 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
643 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
644 nfsm_chain_build_done(error, &nmreq);
645 nfsm_assert(error, (numops == 0), EPROTO);
646 nfsmout_if(error);
647
648 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
649
650 if ((lockerror = nfs_node_lock(dnp))) {
651 error = lockerror;
652 }
653 nfsm_chain_skip_tag(error, &nmrep);
654 nfsm_chain_get_32(error, &nmrep, numops);
655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
657 remove_error = error;
658 nfsm_chain_check_change_info(error, &nmrep, dnp);
659 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
660 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
661 if (error && !lockerror) {
662 NATTRINVALIDATE(dnp);
663 }
664nfsmout:
665 nfsm_chain_cleanup(&nmreq);
666 nfsm_chain_cleanup(&nmrep);
667
668 if (!lockerror) {
669 dnp->n_flag |= NMODIFIED;
670 nfs_node_unlock(dnp);
671 }
672 if (error == NFSERR_GRACE) {
673 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
674 goto restart;
675 }
676
677 return remove_error;
678}
679
680int
681nfs4_rename_rpc(
682 nfsnode_t fdnp,
683 char *fnameptr,
684 int fnamelen,
685 nfsnode_t tdnp,
686 char *tnameptr,
687 int tnamelen,
688 vfs_context_t ctx)
689{
690 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
691 struct nfsmount *nmp;
692 u_int64_t xid, savedxid;
693 struct nfsm_chain nmreq, nmrep;
694 struct nfsreq_secinfo_args si;
695
696 nmp = NFSTONMP(fdnp);
697 if (nfs_mount_gone(nmp)) {
698 return ENXIO;
699 }
700 nfsvers = nmp->nm_vers;
701 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
704 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
705 return EINVAL;
706 }
707
708 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
709 nfsm_chain_null(&nmreq);
710 nfsm_chain_null(&nmrep);
711
712 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
713 numops = 7;
714 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
715 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
718 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
719 numops--;
720 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
723 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
724 numops--;
725 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
726 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
727 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
730 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
731 numops--;
732 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
733 numops--;
734 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
735 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
736 nfsm_chain_build_done(error, &nmreq);
737 nfsm_assert(error, (numops == 0), EPROTO);
738 nfsmout_if(error);
739
740 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
741
742 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
743 error = lockerror;
744 }
745 nfsm_chain_skip_tag(error, &nmrep);
746 nfsm_chain_get_32(error, &nmrep, numops);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
749 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
750 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
751 nfsm_chain_check_change_info(error, &nmrep, fdnp);
752 nfsm_chain_check_change_info(error, &nmrep, tdnp);
753 /* directory attributes: if we don't get them, make sure to invalidate */
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
755 savedxid = xid;
756 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
757 if (error && !lockerror) {
758 NATTRINVALIDATE(tdnp);
759 }
760 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
761 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
762 xid = savedxid;
763 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
764 if (error && !lockerror) {
765 NATTRINVALIDATE(fdnp);
766 }
767nfsmout:
768 nfsm_chain_cleanup(&nmreq);
769 nfsm_chain_cleanup(&nmrep);
770 if (!lockerror) {
771 fdnp->n_flag |= NMODIFIED;
772 tdnp->n_flag |= NMODIFIED;
773 nfs_node_unlock2(fdnp, tdnp);
774 }
775 return error;
776}
777
778/*
779 * NFS V4 readdir RPC.
780 */
781int
782nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
783{
784 struct nfsmount *nmp;
785 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
786 int i, status, more_entries = 1, eof, bp_dropped = 0;
787 uint16_t namlen, reclen;
788 uint32_t nmreaddirsize, nmrsize;
789 uint32_t namlen32, skiplen, fhlen, xlen, attrlen;
790 uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed;
791 struct nfsm_chain nmreq, nmrep, nmrepsave;
792 fhandle_t *fh;
793 struct nfs_vattr *nvattr, *nvattrp;
794 struct nfs_dir_buf_header *ndbhp;
795 struct direntry *dp;
796 char *padstart;
797 const char *tag;
798 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
799 struct timeval now;
800 struct nfsreq_secinfo_args si;
801
802 nmp = NFSTONMP(dnp);
803 if (nfs_mount_gone(nmp)) {
804 return ENXIO;
805 }
806 nfsvers = nmp->nm_vers;
807 nmreaddirsize = nmp->nm_readdirsize;
808 nmrsize = nmp->nm_rsize;
809 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
810 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
811 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
812 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
813 return EINVAL;
814 }
815 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
816
817 /*
818 * Set up attribute request for entries.
819 * For READDIRPLUS functionality, get everything.
820 * Otherwise, just get what we need for struct direntry.
821 */
822 if (rdirplus) {
823 tag = "readdirplus";
824 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
826 } else {
827 tag = "readdir";
828 NFS_CLEAR_ATTRIBUTES(entry_attrs);
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
830 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
831 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
832 }
833 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
834
835 /* lock to protect access to cookie verifier */
836 if ((lockerror = nfs_node_lock(dnp))) {
837 return lockerror;
838 }
839
840 fh = zalloc(nfs_fhandle_zone);
841 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
842
843 /* determine cookie to use, and move dp to the right offset */
844 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
845 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
846 if (ndbhp->ndbh_count) {
847 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
848 dp = NFS_DIRENTRY_NEXT(dp);
849 }
850 cookie = dp->d_seekoff;
851 dp = NFS_DIRENTRY_NEXT(dp);
852 } else {
853 cookie = bp->nb_lblkno;
854 /* increment with every buffer read */
855 OSAddAtomic64(1, &nfsstats.readdir_bios);
856 }
857 lastcookie = cookie;
858
859 /*
860 * The NFS client is responsible for the "." and ".." entries in the
861 * directory. So, we put them at the start of the first buffer.
862 * Don't bother for attribute directories.
863 */
864 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
865 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
866 fh->fh_len = 0;
867 fhlen = rdirplus ? fh->fh_len + 1 : 0;
868 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
869 /* "." */
870 namlen = 1;
871 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
872 if (xlen) {
873 bzero(&dp->d_name[namlen + 1], xlen);
874 }
875 dp->d_namlen = namlen;
876 strlcpy(dp->d_name, ".", namlen + 1);
877 dp->d_fileno = dnp->n_vattr.nva_fileid;
878 dp->d_type = DT_DIR;
879 dp->d_reclen = reclen;
880 dp->d_seekoff = 1;
881 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
882 dp = NFS_DIRENTRY_NEXT(dp);
883 padlen = (char*)dp - padstart;
884 if (padlen > 0) {
885 bzero(padstart, padlen);
886 }
887 if (rdirplus) { /* zero out attributes */
888 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
889 }
890
891 /* ".." */
892 namlen = 2;
893 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
894 if (xlen) {
895 bzero(&dp->d_name[namlen + 1], xlen);
896 }
897 dp->d_namlen = namlen;
898 strlcpy(dp->d_name, "..", namlen + 1);
899 if (dnp->n_parent) {
900 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
901 } else {
902 dp->d_fileno = dnp->n_vattr.nva_fileid;
903 }
904 dp->d_type = DT_DIR;
905 dp->d_reclen = reclen;
906 dp->d_seekoff = 2;
907 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
908 dp = NFS_DIRENTRY_NEXT(dp);
909 padlen = (char*)dp - padstart;
910 if (padlen > 0) {
911 bzero(padstart, padlen);
912 }
913 if (rdirplus) { /* zero out attributes */
914 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
915 }
916
917 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
918 ndbhp->ndbh_count = 2;
919 }
920
921 /*
922 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
923 * the buffer is full (or we hit EOF). Then put the remainder of the
924 * results in the next buffer(s).
925 */
926 nfsm_chain_null(&nmreq);
927 nfsm_chain_null(&nmrep);
928 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
929 // PUTFH, GETATTR, READDIR
930 numops = 3;
931 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
932 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
933 numops--;
934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
935 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
936 numops--;
937 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
938 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
939 numops--;
940 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
941 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
942 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
943 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
944 nfsm_chain_add_32(error, &nmreq, nmrsize);
945 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
946 nfsm_chain_build_done(error, &nmreq);
947 nfsm_assert(error, (numops == 0), EPROTO);
948 nfs_node_unlock(dnp);
949 nfsmout_if(error);
950 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
951
952 if ((lockerror = nfs_node_lock(dnp))) {
953 error = lockerror;
954 }
955
956 savedxid = xid;
957 nfsm_chain_skip_tag(error, &nmrep);
958 nfsm_chain_get_32(error, &nmrep, numops);
959 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
960 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
961 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
962 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
963 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
964 nfsm_chain_get_32(error, &nmrep, more_entries);
965
966 if (!lockerror) {
967 nfs_node_unlock(dnp);
968 lockerror = ENOENT;
969 }
970 nfsmout_if(error);
971
972 if (rdirplus) {
973 microuptime(&now);
974 if (lastcookie == 0) {
975 dnp->n_rdirplusstamp_sof = now.tv_sec;
976 dnp->n_rdirplusstamp_eof = 0;
977 }
978 }
979
980 /* loop through the entries packing them into the buffer */
981 while (more_entries) {
982 /* Entry: COOKIE, NAME, FATTR */
983 nfsm_chain_get_64(error, &nmrep, cookie);
984 nfsm_chain_get_32(error, &nmrep, namlen32);
985 if (namlen32 > UINT16_MAX) {
986 error = EBADRPC;
987 goto nfsmout;
988 }
989 namlen = (uint16_t)namlen32;
990 nfsmout_if(error);
991 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
992 /* we've got a big cookie, make sure flag is set */
993 lck_mtx_lock(&nmp->nm_lock);
994 nmp->nm_state |= NFSSTA_BIGCOOKIES;
995 lck_mtx_unlock(&nmp->nm_lock);
996 bigcookies = 1;
997 }
998 /* just truncate names that don't fit in direntry.d_name */
999 if (namlen <= 0) {
1000 error = EBADRPC;
1001 goto nfsmout;
1002 }
1003 if (namlen > (sizeof(dp->d_name) - 1)) {
1004 skiplen = namlen - sizeof(dp->d_name) + 1;
1005 namlen = sizeof(dp->d_name) - 1;
1006 } else {
1007 skiplen = 0;
1008 }
1009 /* guess that fh size will be same as parent */
1010 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
1011 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1012 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
1013 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1014 space_needed = reclen + attrlen;
1015 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1016 if (space_needed > space_free) {
1017 /*
1018 * We still have entries to pack, but we've
1019 * run out of room in the current buffer.
1020 * So we need to move to the next buffer.
1021 * The block# for the next buffer is the
1022 * last cookie in the current buffer.
1023 */
1024nextbuffer:
1025 ndbhp->ndbh_flags |= NDB_FULL;
1026 nfs_buf_release(bp, 0);
1027 bp_dropped = 1;
1028 bp = NULL;
1029 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1030 nfsmout_if(error);
1031 /* initialize buffer */
1032 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1033 ndbhp->ndbh_flags = 0;
1034 ndbhp->ndbh_count = 0;
1035 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1036 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1037 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1038 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1039 /* increment with every buffer read */
1040 OSAddAtomic64(1, &nfsstats.readdir_bios);
1041 }
1042 nmrepsave = nmrep;
1043 dp->d_fileno = cookie; /* placeholder */
1044 dp->d_seekoff = cookie;
1045 dp->d_namlen = namlen;
1046 dp->d_reclen = reclen;
1047 dp->d_type = DT_UNKNOWN;
1048 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1049 nfsmout_if(error);
1050 dp->d_name[namlen] = '\0';
1051 if (skiplen) {
1052 nfsm_chain_adv(error, &nmrep,
1053 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1054 }
1055 nfsmout_if(error);
1056 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr;
1057 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL);
1058 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1059 /* we do NOT want ACLs returned to us here */
1060 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1061 if (nvattrp->nva_acl) {
1062 kauth_acl_free(nvattrp->nva_acl);
1063 nvattrp->nva_acl = NULL;
1064 }
1065 }
1066 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1067 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1068 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1069 /* set this up to look like a referral trigger */
1070 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh);
1071 }
1072 error = 0;
1073 }
1074 /* check for more entries after this one */
1075 nfsm_chain_get_32(error, &nmrep, more_entries);
1076 nfsmout_if(error);
1077
1078 /* Skip any "." and ".." entries returned from server. */
1079 /* Also skip any bothersome named attribute entries. */
1080 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1081 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1082 lastcookie = cookie;
1083 continue;
1084 }
1085
1086 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1087 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1088 }
1089 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1090 dp->d_fileno = nvattrp->nva_fileid;
1091 }
1092 if (rdirplus) {
1093 /* fileid is already in d_fileno, so stash xid in attrs */
1094 nvattrp->nva_fileid = savedxid;
1095 nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID;
1096 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1097 fhlen = fh->fh_len + 1;
1098 xlen = fhlen + sizeof(time_t);
1099 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1100 space_needed = reclen + attrlen;
1101 if (space_needed > space_free) {
1102 /* didn't actually have the room... move on to next buffer */
1103 nmrep = nmrepsave;
1104 goto nextbuffer;
1105 }
1106 /* pack the file handle into the record */
1107 dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
1108 bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len);
1109 } else {
1110 /* mark the file handle invalid */
1111 fh->fh_len = 0;
1112 fhlen = fh->fh_len + 1;
1113 xlen = fhlen + sizeof(time_t);
1114 reclen = NFS_DIRENTRY_LEN_16(namlen + xlen);
1115 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1116 }
1117 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1118 dp->d_reclen = reclen;
1119 nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid);
1120 }
1121 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1122 ndbhp->ndbh_count++;
1123 lastcookie = cookie;
1124
1125 /* advance to next direntry in buffer */
1126 dp = NFS_DIRENTRY_NEXT(dp);
1127 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1128 /* zero out the pad bytes */
1129 padlen = (char*)dp - padstart;
1130 if (padlen > 0) {
1131 bzero(padstart, padlen);
1132 }
1133 }
1134 /* Finally, get the eof boolean */
1135 nfsm_chain_get_32(error, &nmrep, eof);
1136 nfsmout_if(error);
1137 if (eof) {
1138 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1139 nfs_node_lock_force(dnp);
1140 dnp->n_eofcookie = lastcookie;
1141 if (rdirplus) {
1142 dnp->n_rdirplusstamp_eof = now.tv_sec;
1143 }
1144 nfs_node_unlock(dnp);
1145 } else {
1146 more_entries = 1;
1147 }
1148 if (bp_dropped) {
1149 nfs_buf_release(bp, 0);
1150 bp = NULL;
1151 break;
1152 }
1153 if ((lockerror = nfs_node_lock(dnp))) {
1154 error = lockerror;
1155 }
1156 nfsmout_if(error);
1157 nfsm_chain_cleanup(&nmrep);
1158 nfsm_chain_null(&nmreq);
1159 }
1160nfsmout:
1161 if (bp_dropped && bp) {
1162 nfs_buf_release(bp, 0);
1163 }
1164 if (!lockerror) {
1165 nfs_node_unlock(dnp);
1166 }
1167 nfsm_chain_cleanup(&nmreq);
1168 nfsm_chain_cleanup(&nmrep);
1169 NFS_ZFREE(nfs_fhandle_zone, fh);
1170 FREE(nvattr, M_TEMP);
1171 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1172}
1173
1174int
1175nfs4_lookup_rpc_async(
1176 nfsnode_t dnp,
1177 char *name,
1178 int namelen,
1179 vfs_context_t ctx,
1180 struct nfsreq **reqp)
1181{
1182 int error = 0, isdotdot = 0, nfsvers, numops;
1183 struct nfsm_chain nmreq;
1184 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1185 struct nfsmount *nmp;
1186 struct nfsreq_secinfo_args si;
1187
1188 nmp = NFSTONMP(dnp);
1189 if (nfs_mount_gone(nmp)) {
1190 return ENXIO;
1191 }
1192 nfsvers = nmp->nm_vers;
1193 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1194 return EINVAL;
1195 }
1196
1197 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1198 isdotdot = 1;
1199 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1200 } else {
1201 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1202 }
1203
1204 nfsm_chain_null(&nmreq);
1205
1206 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1207 numops = 5;
1208 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1209 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1212 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1213 numops--;
1214 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1215 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1216 numops--;
1217 if (isdotdot) {
1218 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1219 } else {
1220 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1221 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1222 }
1223 numops--;
1224 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1225 numops--;
1226 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1227 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1228 /* some ".zfs" directories can't handle being asked for some attributes */
1229 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1230 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1231 }
1232 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1233 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1234 }
1235 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1236 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1237 }
1238 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1239 nfsm_chain_build_done(error, &nmreq);
1240 nfsm_assert(error, (numops == 0), EPROTO);
1241 nfsmout_if(error);
1242 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1243 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1244nfsmout:
1245 nfsm_chain_cleanup(&nmreq);
1246 return error;
1247}
1248
1249
1250int
1251nfs4_lookup_rpc_async_finish(
1252 nfsnode_t dnp,
1253 char *name,
1254 int namelen,
1255 vfs_context_t ctx,
1256 struct nfsreq *req,
1257 u_int64_t *xidp,
1258 fhandle_t *fhp,
1259 struct nfs_vattr *nvap)
1260{
1261 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1262 uint32_t op = NFS_OP_LOOKUP;
1263 u_int64_t xid;
1264 struct nfsmount *nmp;
1265 struct nfsm_chain nmrep;
1266
1267 nmp = NFSTONMP(dnp);
1268 if (nmp == NULL) {
1269 return ENXIO;
1270 }
1271 nfsvers = nmp->nm_vers;
1272 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1273 isdotdot = 1;
1274 }
1275
1276 nfsm_chain_null(&nmrep);
1277
1278 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1279
1280 if ((lockerror = nfs_node_lock(dnp))) {
1281 error = lockerror;
1282 }
1283 nfsm_chain_skip_tag(error, &nmrep);
1284 nfsm_chain_get_32(error, &nmrep, numops);
1285 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1287 if (xidp) {
1288 *xidp = xid;
1289 }
1290 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1291
1292 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1293 nfsmout_if(error || !fhp || !nvap);
1294 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1295 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1296 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1297 error = EBADRPC;
1298 }
1299 nfsmout_if(error);
1300 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1301 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1302 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1303 /* set this up to look like a referral trigger */
1304 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1305 error = 0;
1306 } else {
1307 nfsmout_if(error);
1308 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1309 }
1310nfsmout:
1311 if (!lockerror) {
1312 nfs_node_unlock(dnp);
1313 }
1314 nfsm_chain_cleanup(&nmrep);
1315 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1316 /* We still need to get SECINFO to set default for mount. */
1317 /* Do so for the first LOOKUP that returns successfully. */
1318 struct nfs_sec sec;
1319
1320 sec.count = NX_MAX_SEC_FLAVORS;
1321 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1322 /* [sigh] some implementations return "illegal" error for unsupported ops */
1323 if (error == NFSERR_OP_ILLEGAL) {
1324 error = 0;
1325 }
1326 if (!error) {
1327 /* set our default security flavor to the first in the list */
1328 lck_mtx_lock(&nmp->nm_lock);
1329 if (sec.count) {
1330 nmp->nm_auth = sec.flavors[0];
1331 }
1332 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1333 lck_mtx_unlock(&nmp->nm_lock);
1334 }
1335 }
1336 return error;
1337}
1338
1339int
1340nfs4_commit_rpc(
1341 nfsnode_t np,
1342 uint64_t offset,
1343 uint64_t count,
1344 kauth_cred_t cred,
1345 uint64_t wverf)
1346{
1347 struct nfsmount *nmp;
1348 int error = 0, lockerror, status, nfsvers, numops;
1349 u_int64_t xid, newwverf;
1350 uint32_t count32;
1351 struct nfsm_chain nmreq, nmrep;
1352 struct nfsreq_secinfo_args si;
1353
1354 nmp = NFSTONMP(np);
1355 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1356 if (nfs_mount_gone(nmp)) {
1357 return ENXIO;
1358 }
1359 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1360 return EINVAL;
1361 }
1362 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1363 return 0;
1364 }
1365 nfsvers = nmp->nm_vers;
1366 count32 = count > UINT32_MAX ? 0 : (uint32_t)count;
1367
1368 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1369 nfsm_chain_null(&nmreq);
1370 nfsm_chain_null(&nmrep);
1371
1372 // PUTFH, COMMIT, GETATTR
1373 numops = 3;
1374 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1375 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1376 numops--;
1377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1379 numops--;
1380 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1381 nfsm_chain_add_64(error, &nmreq, offset);
1382 nfsm_chain_add_32(error, &nmreq, count32);
1383 numops--;
1384 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1385 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1386 nfsm_chain_build_done(error, &nmreq);
1387 nfsm_assert(error, (numops == 0), EPROTO);
1388 nfsmout_if(error);
1389 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1390 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1391
1392 if ((lockerror = nfs_node_lock(np))) {
1393 error = lockerror;
1394 }
1395 nfsm_chain_skip_tag(error, &nmrep);
1396 nfsm_chain_get_32(error, &nmrep, numops);
1397 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1398 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1399 nfsm_chain_get_64(error, &nmrep, newwverf);
1400 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1401 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1402 if (!lockerror) {
1403 nfs_node_unlock(np);
1404 }
1405 nfsmout_if(error);
1406 lck_mtx_lock(&nmp->nm_lock);
1407 if (nmp->nm_verf != newwverf) {
1408 nmp->nm_verf = newwverf;
1409 }
1410 if (wverf != newwverf) {
1411 error = NFSERR_STALEWRITEVERF;
1412 }
1413 lck_mtx_unlock(&nmp->nm_lock);
1414nfsmout:
1415 nfsm_chain_cleanup(&nmreq);
1416 nfsm_chain_cleanup(&nmrep);
1417 return error;
1418}
1419
1420int
1421nfs4_pathconf_rpc(
1422 nfsnode_t np,
1423 struct nfs_fsattr *nfsap,
1424 vfs_context_t ctx)
1425{
1426 u_int64_t xid;
1427 int error = 0, lockerror, status, nfsvers, numops;
1428 struct nfsm_chain nmreq, nmrep;
1429 struct nfsmount *nmp = NFSTONMP(np);
1430 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1431 struct nfs_vattr *nvattr;
1432 struct nfsreq_secinfo_args si;
1433
1434 if (nfs_mount_gone(nmp)) {
1435 return ENXIO;
1436 }
1437 nfsvers = nmp->nm_vers;
1438 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1439 return EINVAL;
1440 }
1441
1442 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1443 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
1444 NVATTR_INIT(nvattr);
1445 nfsm_chain_null(&nmreq);
1446 nfsm_chain_null(&nmrep);
1447
1448 /* NFSv4: fetch "pathconf" info for this node */
1449 // PUTFH, GETATTR
1450 numops = 2;
1451 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1452 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1453 numops--;
1454 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1455 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1456 numops--;
1457 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1459 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1460 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1461 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1462 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1463 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1464 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1465 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1466 nfsm_chain_build_done(error, &nmreq);
1467 nfsm_assert(error, (numops == 0), EPROTO);
1468 nfsmout_if(error);
1469 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1470
1471 nfsm_chain_skip_tag(error, &nmrep);
1472 nfsm_chain_get_32(error, &nmrep, numops);
1473 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1474 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1475 nfsmout_if(error);
1476 error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL);
1477 nfsmout_if(error);
1478 if ((lockerror = nfs_node_lock(np))) {
1479 error = lockerror;
1480 }
1481 if (!error) {
1482 nfs_loadattrcache(np, nvattr, &xid, 0);
1483 }
1484 if (!lockerror) {
1485 nfs_node_unlock(np);
1486 }
1487nfsmout:
1488 NVATTR_CLEANUP(nvattr);
1489 FREE(nvattr, M_TEMP);
1490 nfsm_chain_cleanup(&nmreq);
1491 nfsm_chain_cleanup(&nmrep);
1492 return error;
1493}
1494
1495int
1496nfs4_vnop_getattr(
1497 struct vnop_getattr_args /* {
1498 * struct vnodeop_desc *a_desc;
1499 * vnode_t a_vp;
1500 * struct vnode_attr *a_vap;
1501 * vfs_context_t a_context;
1502 * } */*ap)
1503{
1504 struct vnode_attr *vap = ap->a_vap;
1505 struct nfsmount *nmp;
1506 struct nfs_vattr *nva;
1507 int error, acls, ngaflags;
1508
1509 nmp = VTONMP(ap->a_vp);
1510 if (nfs_mount_gone(nmp)) {
1511 return ENXIO;
1512 }
1513 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1514
1515 ngaflags = NGA_CACHED;
1516 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1517 ngaflags |= NGA_ACL;
1518 }
1519 MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK);
1520 error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags);
1521 if (error) {
1522 goto out;
1523 }
1524
1525 /* copy what we have in nva to *a_vap */
1526 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) {
1527 dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2);
1528 VATTR_RETURN(vap, va_rdev, rdev);
1529 }
1530 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) {
1531 VATTR_RETURN(vap, va_nlink, nva->nva_nlink);
1532 }
1533 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) {
1534 VATTR_RETURN(vap, va_data_size, nva->nva_size);
1535 }
1536 // VATTR_RETURN(vap, va_data_alloc, ???);
1537 // VATTR_RETURN(vap, va_total_size, ???);
1538 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) {
1539 VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes);
1540 }
1541 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1542 VATTR_RETURN(vap, va_uid, nva->nva_uid);
1543 }
1544 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) {
1545 VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid);
1546 }
1547 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1548 VATTR_RETURN(vap, va_gid, nva->nva_gid);
1549 }
1550 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1551 VATTR_RETURN(vap, va_guuid, nva->nva_guuid);
1552 }
1553 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1554 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) {
1555 VATTR_RETURN(vap, va_mode, ACCESSPERMS);
1556 } else {
1557 VATTR_RETURN(vap, va_mode, nva->nva_mode);
1558 }
1559 }
1560 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1561 (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) ||
1562 NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) ||
1563 (nva->nva_flags & NFS_FFLAG_TRIGGER))) {
1564 uint32_t flags = 0;
1565 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1566 (nva->nva_flags & NFS_FFLAG_ARCHIVED)) {
1567 flags |= SF_ARCHIVED;
1568 }
1569 if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) &&
1570 (nva->nva_flags & NFS_FFLAG_HIDDEN)) {
1571 flags |= UF_HIDDEN;
1572 }
1573 VATTR_RETURN(vap, va_flags, flags);
1574 }
1575 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1576 vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE];
1577 vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE];
1578 VATTR_SET_SUPPORTED(vap, va_create_time);
1579 }
1580 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1581 vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS];
1582 vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS];
1583 VATTR_SET_SUPPORTED(vap, va_access_time);
1584 }
1585 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1586 vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY];
1587 vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY];
1588 VATTR_SET_SUPPORTED(vap, va_modify_time);
1589 }
1590 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1591 vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE];
1592 vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE];
1593 VATTR_SET_SUPPORTED(vap, va_change_time);
1594 }
1595 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1596 vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP];
1597 vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP];
1598 VATTR_SET_SUPPORTED(vap, va_backup_time);
1599 }
1600 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) {
1601 VATTR_RETURN(vap, va_fileid, nva->nva_fileid);
1602 }
1603 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) {
1604 VATTR_RETURN(vap, va_type, nva->nva_type);
1605 }
1606 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) {
1607 VATTR_RETURN(vap, va_filerev, nva->nva_change);
1608 }
1609
1610 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1611 VATTR_RETURN(vap, va_acl, nva->nva_acl);
1612 nva->nva_acl = NULL;
1613 }
1614
1615 // other attrs we might support someday:
1616 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1617
1618 NVATTR_CLEANUP(nva);
1619out:
1620 FREE(nva, M_TEMP);
1621 return error;
1622}
1623
1624int
1625nfs4_setattr_rpc(
1626 nfsnode_t np,
1627 struct vnode_attr *vap,
1628 vfs_context_t ctx)
1629{
1630 struct nfsmount *nmp = NFSTONMP(np);
1631 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1632 u_int64_t xid, nextxid;
1633 struct nfsm_chain nmreq, nmrep;
1634 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1635 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1636 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1637 nfs_stateid stateid;
1638 struct nfsreq_secinfo_args si;
1639
1640 if (nfs_mount_gone(nmp)) {
1641 return ENXIO;
1642 }
1643 nfsvers = nmp->nm_vers;
1644 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1645 return EINVAL;
1646 }
1647
1648 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1649 /* we don't support setting unsupported flags (duh!) */
1650 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1651 return EINVAL; /* return EINVAL if other attributes also set */
1652 } else {
1653 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1654 }
1655 }
1656
1657 /* don't bother requesting some changes if they don't look like they are changing */
1658 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1659 VATTR_CLEAR_ACTIVE(vap, va_uid);
1660 }
1661 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1662 VATTR_CLEAR_ACTIVE(vap, va_gid);
1663 }
1664 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1665 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1666 }
1667 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1668 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1669 }
1670
1671tryagain:
1672 /* do nothing if no attributes will be sent */
1673 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1674 if (!bitmap[0] && !bitmap[1]) {
1675 return 0;
1676 }
1677
1678 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1679 nfsm_chain_null(&nmreq);
1680 nfsm_chain_null(&nmrep);
1681
1682 /*
1683 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1684 * need to invalidate any cached ACL. And if we had an ACL cached,
1685 * we might as well also fetch the new value.
1686 */
1687 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1688 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1689 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1690 if (NACLVALID(np)) {
1691 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1692 }
1693 NACLINVALIDATE(np);
1694 }
1695
1696 // PUTFH, SETATTR, GETATTR
1697 numops = 3;
1698 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1699 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1700 numops--;
1701 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1702 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1703 numops--;
1704 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1705 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1706 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1707 } else {
1708 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1709 }
1710 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1711 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1712 numops--;
1713 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1714 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1715 nfsm_chain_build_done(error, &nmreq);
1716 nfsm_assert(error, (numops == 0), EPROTO);
1717 nfsmout_if(error);
1718 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1719
1720 if ((lockerror = nfs_node_lock(np))) {
1721 error = lockerror;
1722 }
1723 nfsm_chain_skip_tag(error, &nmrep);
1724 nfsm_chain_get_32(error, &nmrep, numops);
1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1726 nfsmout_if(error);
1727 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1728 nfsmout_if(error == EBADRPC);
1729 setattr_error = error;
1730 error = 0;
1731 bmlen = NFS_ATTR_BITMAP_LEN;
1732 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1733 if (!error) {
1734 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1735 microuptime(&np->n_lastio);
1736 }
1737 nfs_vattr_set_supported(setbitmap, vap);
1738 error = setattr_error;
1739 }
1740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1741 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1742 if (error) {
1743 NATTRINVALIDATE(np);
1744 }
1745 /*
1746 * We just changed the attributes and we want to make sure that we
1747 * see the latest attributes. Get the next XID. If it's not the
1748 * next XID after the SETATTR XID, then it's possible that another
1749 * RPC was in flight at the same time and it might put stale attributes
1750 * in the cache. In that case, we invalidate the attributes and set
1751 * the attribute cache XID to guarantee that newer attributes will
1752 * get loaded next.
1753 */
1754 nextxid = 0;
1755 nfs_get_xid(&nextxid);
1756 if (nextxid != (xid + 1)) {
1757 np->n_xid = nextxid;
1758 NATTRINVALIDATE(np);
1759 }
1760nfsmout:
1761 if (!lockerror) {
1762 nfs_node_unlock(np);
1763 }
1764 nfsm_chain_cleanup(&nmreq);
1765 nfsm_chain_cleanup(&nmrep);
1766 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1767 /*
1768 * Some server's may not like ACL/mode combos that get sent.
1769 * If it looks like that's what the server choked on, try setting
1770 * just the ACL and not the mode (unless it looks like everything
1771 * but mode was already successfully set).
1772 */
1773 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1774 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1775 VATTR_CLEAR_ACTIVE(vap, va_mode);
1776 error = 0;
1777 goto tryagain;
1778 }
1779 }
1780 return error;
1781}
1782#endif /* CONFIG_NFS4 */
1783
1784/*
1785 * Wait for any pending recovery to complete.
1786 */
1787int
1788nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1789{
1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1791 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1792
1793 lck_mtx_lock(&nmp->nm_lock);
1794 while (nmp->nm_state & NFSSTA_RECOVER) {
1795 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1796 break;
1797 }
1798 nfs_mount_sock_thread_wake(nmp);
1799 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1800 slpflag = 0;
1801 }
1802 lck_mtx_unlock(&nmp->nm_lock);
1803
1804 return error;
1805}
1806
1807/*
1808 * We're about to use/manipulate NFS mount's open/lock state.
1809 * Wait for any pending state recovery to complete, then
1810 * mark the state as being in use (which will hold off
1811 * the recovery thread until we're done).
1812 */
1813int
1814nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1815{
1816 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1817 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1818
1819 if (nfs_mount_gone(nmp)) {
1820 return ENXIO;
1821 }
1822 lck_mtx_lock(&nmp->nm_lock);
1823 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1824 lck_mtx_unlock(&nmp->nm_lock);
1825 return ENXIO;
1826 }
1827 while (nmp->nm_state & NFSSTA_RECOVER) {
1828 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1829 break;
1830 }
1831 nfs_mount_sock_thread_wake(nmp);
1832 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1833 slpflag = 0;
1834 }
1835 if (!error) {
1836 nmp->nm_stateinuse++;
1837 }
1838 lck_mtx_unlock(&nmp->nm_lock);
1839
1840 return error;
1841}
1842
1843/*
1844 * We're done using/manipulating the NFS mount's open/lock
1845 * state. If the given error indicates that recovery should
1846 * be performed, we'll initiate recovery.
1847 */
1848int
1849nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1850{
1851 int restart = nfs_mount_state_error_should_restart(error);
1852
1853 if (nfs_mount_gone(nmp)) {
1854 return ENXIO;
1855 }
1856 lck_mtx_lock(&nmp->nm_lock);
1857 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1858 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1859 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1860 nfs_need_recover(nmp, error);
1861 }
1862 if (nmp->nm_stateinuse > 0) {
1863 nmp->nm_stateinuse--;
1864 } else {
1865 panic("NFS mount state in use count underrun");
1866 }
1867 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1868 wakeup(&nmp->nm_stateinuse);
1869 }
1870 lck_mtx_unlock(&nmp->nm_lock);
1871 if (error == NFSERR_GRACE) {
1872 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1873 }
1874
1875 return restart;
1876}
1877
1878/*
1879 * Does the error mean we should restart/redo a state-related operation?
1880 */
1881int
1882nfs_mount_state_error_should_restart(int error)
1883{
1884 switch (error) {
1885 case NFSERR_STALE_STATEID:
1886 case NFSERR_STALE_CLIENTID:
1887 case NFSERR_ADMIN_REVOKED:
1888 case NFSERR_EXPIRED:
1889 case NFSERR_OLD_STATEID:
1890 case NFSERR_BAD_STATEID:
1891 case NFSERR_GRACE:
1892 return 1;
1893 }
1894 return 0;
1895}
1896
1897/*
1898 * In some cases we may want to limit how many times we restart a
1899 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1900 * Base the limit on the lease (as long as it's not too short).
1901 */
1902uint
1903nfs_mount_state_max_restarts(struct nfsmount *nmp)
1904{
1905 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1906}
1907
1908/*
1909 * Does the error mean we probably lost a delegation?
1910 */
1911int
1912nfs_mount_state_error_delegation_lost(int error)
1913{
1914 switch (error) {
1915 case NFSERR_STALE_STATEID:
1916 case NFSERR_ADMIN_REVOKED:
1917 case NFSERR_EXPIRED:
1918 case NFSERR_OLD_STATEID:
1919 case NFSERR_BAD_STATEID:
1920 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1921 return 1;
1922 }
1923 return 0;
1924}
1925
1926
1927/*
1928 * Mark an NFS node's open state as busy.
1929 */
1930int
1931nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1932{
1933 struct nfsmount *nmp;
1934 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1935 int error = 0, slpflag;
1936
1937 nmp = NFSTONMP(np);
1938 if (nfs_mount_gone(nmp)) {
1939 return ENXIO;
1940 }
1941 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1942
1943 lck_mtx_lock(&np->n_openlock);
1944 while (np->n_openflags & N_OPENBUSY) {
1945 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1946 break;
1947 }
1948 np->n_openflags |= N_OPENWANT;
1949 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1950 slpflag = 0;
1951 }
1952 if (!error) {
1953 np->n_openflags |= N_OPENBUSY;
1954 }
1955 lck_mtx_unlock(&np->n_openlock);
1956
1957 return error;
1958}
1959
1960/*
1961 * Clear an NFS node's open state busy flag and wake up
1962 * anyone wanting it.
1963 */
1964void
1965nfs_open_state_clear_busy(nfsnode_t np)
1966{
1967 int wanted;
1968
1969 lck_mtx_lock(&np->n_openlock);
1970 if (!(np->n_openflags & N_OPENBUSY)) {
1971 panic("nfs_open_state_clear_busy");
1972 }
1973 wanted = (np->n_openflags & N_OPENWANT);
1974 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1975 lck_mtx_unlock(&np->n_openlock);
1976 if (wanted) {
1977 wakeup(&np->n_openflags);
1978 }
1979}
1980
1981/*
1982 * Search a mount's open owner list for the owner for this credential.
1983 * If not found and "alloc" is set, then allocate a new one.
1984 */
1985struct nfs_open_owner *
1986nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1987{
1988 uid_t uid = kauth_cred_getuid(cred);
1989 struct nfs_open_owner *noop, *newnoop = NULL;
1990
1991tryagain:
1992 lck_mtx_lock(&nmp->nm_lock);
1993 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1994 if (kauth_cred_getuid(noop->noo_cred) == uid) {
1995 break;
1996 }
1997 }
1998
1999 if (!noop && !newnoop && alloc) {
2000 lck_mtx_unlock(&nmp->nm_lock);
2001 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
2002 if (!newnoop) {
2003 return NULL;
2004 }
2005 bzero(newnoop, sizeof(*newnoop));
2006 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
2007 newnoop->noo_mount = nmp;
2008 kauth_cred_ref(cred);
2009 newnoop->noo_cred = cred;
2010 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
2011 TAILQ_INIT(&newnoop->noo_opens);
2012 goto tryagain;
2013 }
2014 if (!noop && newnoop) {
2015 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
2016 os_ref_init(&newnoop->noo_refcnt, NULL);
2017 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2018 noop = newnoop;
2019 }
2020 lck_mtx_unlock(&nmp->nm_lock);
2021
2022 if (newnoop && (noop != newnoop)) {
2023 nfs_open_owner_destroy(newnoop);
2024 }
2025
2026 if (noop) {
2027 nfs_open_owner_ref(noop);
2028 }
2029
2030 return noop;
2031}
2032
2033/*
2034 * destroy an open owner that's no longer needed
2035 */
2036void
2037nfs_open_owner_destroy(struct nfs_open_owner *noop)
2038{
2039 if (noop->noo_cred) {
2040 kauth_cred_unref(&noop->noo_cred);
2041 }
2042 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2043 FREE(noop, M_TEMP);
2044}
2045
2046/*
2047 * acquire a reference count on an open owner
2048 */
2049void
2050nfs_open_owner_ref(struct nfs_open_owner *noop)
2051{
2052 lck_mtx_lock(&noop->noo_lock);
2053 os_ref_retain_locked(&noop->noo_refcnt);
2054 lck_mtx_unlock(&noop->noo_lock);
2055}
2056
2057/*
2058 * drop a reference count on an open owner and destroy it if
2059 * it is no longer referenced and no longer on the mount's list.
2060 */
2061void
2062nfs_open_owner_rele(struct nfs_open_owner *noop)
2063{
2064 os_ref_count_t newcount;
2065
2066 lck_mtx_lock(&noop->noo_lock);
2067 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2068 panic("nfs_open_owner_rele: no refcnt");
2069 }
2070 newcount = os_ref_release_locked(&noop->noo_refcnt);
2071 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2072 panic("nfs_open_owner_rele: busy");
2073 }
2074 /* XXX we may potentially want to clean up idle/unused open owner structures */
2075 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2076 lck_mtx_unlock(&noop->noo_lock);
2077 return;
2078 }
2079 /* owner is no longer referenced or linked to mount, so destroy it */
2080 lck_mtx_unlock(&noop->noo_lock);
2081 nfs_open_owner_destroy(noop);
2082}
2083
2084/*
2085 * Mark an open owner as busy because we are about to
2086 * start an operation that uses and updates open owner state.
2087 */
2088int
2089nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2090{
2091 struct nfsmount *nmp;
2092 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2093 int error = 0, slpflag;
2094
2095 nmp = noop->noo_mount;
2096 if (nfs_mount_gone(nmp)) {
2097 return ENXIO;
2098 }
2099 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2100
2101 lck_mtx_lock(&noop->noo_lock);
2102 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2103 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2104 break;
2105 }
2106 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2107 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2108 slpflag = 0;
2109 }
2110 if (!error) {
2111 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2112 }
2113 lck_mtx_unlock(&noop->noo_lock);
2114
2115 return error;
2116}
2117
2118/*
2119 * Clear the busy flag on an open owner and wake up anyone waiting
2120 * to mark it busy.
2121 */
2122void
2123nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2124{
2125 int wanted;
2126
2127 lck_mtx_lock(&noop->noo_lock);
2128 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2129 panic("nfs_open_owner_clear_busy");
2130 }
2131 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2132 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2133 lck_mtx_unlock(&noop->noo_lock);
2134 if (wanted) {
2135 wakeup(noop);
2136 }
2137}
2138
2139/*
2140 * Given an open/lock owner and an error code, increment the
2141 * sequence ID if appropriate.
2142 */
2143void
2144nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2145{
2146 switch (error) {
2147 case NFSERR_STALE_CLIENTID:
2148 case NFSERR_STALE_STATEID:
2149 case NFSERR_OLD_STATEID:
2150 case NFSERR_BAD_STATEID:
2151 case NFSERR_BAD_SEQID:
2152 case NFSERR_BADXDR:
2153 case NFSERR_RESOURCE:
2154 case NFSERR_NOFILEHANDLE:
2155 /* do not increment the open seqid on these errors */
2156 return;
2157 }
2158 if (noop) {
2159 noop->noo_seqid++;
2160 }
2161 if (nlop) {
2162 nlop->nlo_seqid++;
2163 }
2164}
2165
2166/*
2167 * Search a node's open file list for any conflicts with this request.
2168 * Also find this open owner's open file structure.
2169 * If not found and "alloc" is set, then allocate one.
2170 */
2171int
2172nfs_open_file_find(
2173 nfsnode_t np,
2174 struct nfs_open_owner *noop,
2175 struct nfs_open_file **nofpp,
2176 uint32_t accessMode,
2177 uint32_t denyMode,
2178 int alloc)
2179{
2180 *nofpp = NULL;
2181 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2182}
2183
2184/*
2185 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2186 * if an existing one is not found. This is used in "create" scenarios to
2187 * officially add the provisional nofp to the node once the node is created.
2188 */
2189int
2190nfs_open_file_find_internal(
2191 nfsnode_t np,
2192 struct nfs_open_owner *noop,
2193 struct nfs_open_file **nofpp,
2194 uint32_t accessMode,
2195 uint32_t denyMode,
2196 int alloc)
2197{
2198 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2199
2200 if (!np) {
2201 goto alloc;
2202 }
2203tryagain:
2204 lck_mtx_lock(&np->n_openlock);
2205 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2206 if (nofp2->nof_owner == noop) {
2207 nofp = nofp2;
2208 if (!accessMode) {
2209 break;
2210 }
2211 }
2212 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2213 /* This request conflicts with an existing open on this client. */
2214 lck_mtx_unlock(&np->n_openlock);
2215 return EACCES;
2216 }
2217 }
2218
2219 /*
2220 * If this open owner doesn't have an open
2221 * file structure yet, we create one for it.
2222 */
2223 if (!nofp && !*nofpp && !newnofp && alloc) {
2224 lck_mtx_unlock(&np->n_openlock);
2225alloc:
2226 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2227 if (!newnofp) {
2228 return ENOMEM;
2229 }
2230 bzero(newnofp, sizeof(*newnofp));
2231 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2232 newnofp->nof_owner = noop;
2233 nfs_open_owner_ref(noop);
2234 newnofp->nof_np = np;
2235 lck_mtx_lock(&noop->noo_lock);
2236 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2237 lck_mtx_unlock(&noop->noo_lock);
2238 if (np) {
2239 goto tryagain;
2240 }
2241 }
2242 if (!nofp) {
2243 if (*nofpp) {
2244 (*nofpp)->nof_np = np;
2245 nofp = *nofpp;
2246 } else {
2247 nofp = newnofp;
2248 }
2249 if (nofp && np) {
2250 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2251 }
2252 }
2253 if (np) {
2254 lck_mtx_unlock(&np->n_openlock);
2255 }
2256
2257 if (alloc && newnofp && (nofp != newnofp)) {
2258 nfs_open_file_destroy(newnofp);
2259 }
2260
2261 *nofpp = nofp;
2262 return nofp ? 0 : ESRCH;
2263}
2264
2265/*
2266 * Destroy an open file structure.
2267 */
2268void
2269nfs_open_file_destroy(struct nfs_open_file *nofp)
2270{
2271 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2272 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2273 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2274 nfs_open_owner_rele(nofp->nof_owner);
2275 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2276 FREE(nofp, M_TEMP);
2277}
2278
2279/*
2280 * Mark an open file as busy because we are about to
2281 * start an operation that uses and updates open file state.
2282 */
2283int
2284nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2285{
2286 struct nfsmount *nmp;
2287 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2288 int error = 0, slpflag;
2289
2290 nmp = nofp->nof_owner->noo_mount;
2291 if (nfs_mount_gone(nmp)) {
2292 return ENXIO;
2293 }
2294 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2295
2296 lck_mtx_lock(&nofp->nof_lock);
2297 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2298 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2299 break;
2300 }
2301 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2302 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2303 slpflag = 0;
2304 }
2305 if (!error) {
2306 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2307 }
2308 lck_mtx_unlock(&nofp->nof_lock);
2309
2310 return error;
2311}
2312
2313/*
2314 * Clear the busy flag on an open file and wake up anyone waiting
2315 * to mark it busy.
2316 */
2317void
2318nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2319{
2320 int wanted;
2321
2322 lck_mtx_lock(&nofp->nof_lock);
2323 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2324 panic("nfs_open_file_clear_busy");
2325 }
2326 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2327 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2328 lck_mtx_unlock(&nofp->nof_lock);
2329 if (wanted) {
2330 wakeup(nofp);
2331 }
2332}
2333
2334/*
2335 * Add the open state for the given access/deny modes to this open file.
2336 */
2337void
2338nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2339{
2340 lck_mtx_lock(&nofp->nof_lock);
2341 nofp->nof_access |= accessMode;
2342 nofp->nof_deny |= denyMode;
2343
2344 if (delegated) {
2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2347 nofp->nof_d_r++;
2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2349 nofp->nof_d_w++;
2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2351 nofp->nof_d_rw++;
2352 }
2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2355 nofp->nof_d_r_dw++;
2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2357 nofp->nof_d_w_dw++;
2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2359 nofp->nof_d_rw_dw++;
2360 }
2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2363 nofp->nof_d_r_drw++;
2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2365 nofp->nof_d_w_drw++;
2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2367 nofp->nof_d_rw_drw++;
2368 }
2369 }
2370 } else {
2371 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2372 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2373 nofp->nof_r++;
2374 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2375 nofp->nof_w++;
2376 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2377 nofp->nof_rw++;
2378 }
2379 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2380 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2381 nofp->nof_r_dw++;
2382 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2383 nofp->nof_w_dw++;
2384 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2385 nofp->nof_rw_dw++;
2386 }
2387 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2388 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2389 nofp->nof_r_drw++;
2390 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2391 nofp->nof_w_drw++;
2392 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2393 nofp->nof_rw_drw++;
2394 }
2395 }
2396 }
2397
2398 nofp->nof_opencnt++;
2399 lck_mtx_unlock(&nofp->nof_lock);
2400}
2401
2402/*
2403 * Find which particular open combo will be closed and report what
2404 * the new modes will be and whether the open was delegated.
2405 */
2406void
2407nfs_open_file_remove_open_find(
2408 struct nfs_open_file *nofp,
2409 uint32_t accessMode,
2410 uint32_t denyMode,
2411 uint8_t *newAccessMode,
2412 uint8_t *newDenyMode,
2413 int *delegated)
2414{
2415 /*
2416 * Calculate new modes: a mode bit gets removed when there's only
2417 * one count in all the corresponding counts
2418 */
2419 *newAccessMode = nofp->nof_access;
2420 *newDenyMode = nofp->nof_deny;
2421
2422 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2423 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2424 ((nofp->nof_r + nofp->nof_d_r +
2425 nofp->nof_rw + nofp->nof_d_rw +
2426 nofp->nof_r_dw + nofp->nof_d_r_dw +
2427 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2428 nofp->nof_r_drw + nofp->nof_d_r_drw +
2429 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2430 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2431 }
2432 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2433 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2434 ((nofp->nof_w + nofp->nof_d_w +
2435 nofp->nof_rw + nofp->nof_d_rw +
2436 nofp->nof_w_dw + nofp->nof_d_w_dw +
2437 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2438 nofp->nof_w_drw + nofp->nof_d_w_drw +
2439 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2440 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2441 }
2442 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2443 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2444 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2445 nofp->nof_w_drw + nofp->nof_d_w_drw +
2446 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2447 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2448 }
2449 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2450 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2451 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2452 nofp->nof_w_drw + nofp->nof_d_w_drw +
2453 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2454 nofp->nof_r_dw + nofp->nof_d_r_dw +
2455 nofp->nof_w_dw + nofp->nof_d_w_dw +
2456 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2457 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2458 }
2459
2460 /* Find the corresponding open access/deny mode counter. */
2461 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2462 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2463 *delegated = (nofp->nof_d_r != 0);
2464 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2465 *delegated = (nofp->nof_d_w != 0);
2466 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2467 *delegated = (nofp->nof_d_rw != 0);
2468 } else {
2469 *delegated = 0;
2470 }
2471 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2472 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2473 *delegated = (nofp->nof_d_r_dw != 0);
2474 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2475 *delegated = (nofp->nof_d_w_dw != 0);
2476 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2477 *delegated = (nofp->nof_d_rw_dw != 0);
2478 } else {
2479 *delegated = 0;
2480 }
2481 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2483 *delegated = (nofp->nof_d_r_drw != 0);
2484 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2485 *delegated = (nofp->nof_d_w_drw != 0);
2486 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2487 *delegated = (nofp->nof_d_rw_drw != 0);
2488 } else {
2489 *delegated = 0;
2490 }
2491 }
2492}
2493
2494/*
2495 * Remove the open state for the given access/deny modes to this open file.
2496 */
2497void
2498nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2499{
2500 uint8_t newAccessMode, newDenyMode;
2501 int delegated = 0;
2502
2503 lck_mtx_lock(&nofp->nof_lock);
2504 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2505
2506 /* Decrement the corresponding open access/deny mode counter. */
2507 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2508 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2509 if (delegated) {
2510 if (nofp->nof_d_r == 0) {
2511 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2512 } else {
2513 nofp->nof_d_r--;
2514 }
2515 } else {
2516 if (nofp->nof_r == 0) {
2517 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2518 } else {
2519 nofp->nof_r--;
2520 }
2521 }
2522 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2523 if (delegated) {
2524 if (nofp->nof_d_w == 0) {
2525 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2526 } else {
2527 nofp->nof_d_w--;
2528 }
2529 } else {
2530 if (nofp->nof_w == 0) {
2531 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2532 } else {
2533 nofp->nof_w--;
2534 }
2535 }
2536 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2537 if (delegated) {
2538 if (nofp->nof_d_rw == 0) {
2539 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2540 } else {
2541 nofp->nof_d_rw--;
2542 }
2543 } else {
2544 if (nofp->nof_rw == 0) {
2545 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2546 } else {
2547 nofp->nof_rw--;
2548 }
2549 }
2550 }
2551 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2552 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2553 if (delegated) {
2554 if (nofp->nof_d_r_dw == 0) {
2555 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2556 } else {
2557 nofp->nof_d_r_dw--;
2558 }
2559 } else {
2560 if (nofp->nof_r_dw == 0) {
2561 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2562 } else {
2563 nofp->nof_r_dw--;
2564 }
2565 }
2566 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2567 if (delegated) {
2568 if (nofp->nof_d_w_dw == 0) {
2569 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2570 } else {
2571 nofp->nof_d_w_dw--;
2572 }
2573 } else {
2574 if (nofp->nof_w_dw == 0) {
2575 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2576 } else {
2577 nofp->nof_w_dw--;
2578 }
2579 }
2580 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2581 if (delegated) {
2582 if (nofp->nof_d_rw_dw == 0) {
2583 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2584 } else {
2585 nofp->nof_d_rw_dw--;
2586 }
2587 } else {
2588 if (nofp->nof_rw_dw == 0) {
2589 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2590 } else {
2591 nofp->nof_rw_dw--;
2592 }
2593 }
2594 }
2595 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2596 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2597 if (delegated) {
2598 if (nofp->nof_d_r_drw == 0) {
2599 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2600 } else {
2601 nofp->nof_d_r_drw--;
2602 }
2603 } else {
2604 if (nofp->nof_r_drw == 0) {
2605 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2606 } else {
2607 nofp->nof_r_drw--;
2608 }
2609 }
2610 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2611 if (delegated) {
2612 if (nofp->nof_d_w_drw == 0) {
2613 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2614 } else {
2615 nofp->nof_d_w_drw--;
2616 }
2617 } else {
2618 if (nofp->nof_w_drw == 0) {
2619 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2620 } else {
2621 nofp->nof_w_drw--;
2622 }
2623 }
2624 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2625 if (delegated) {
2626 if (nofp->nof_d_rw_drw == 0) {
2627 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2628 } else {
2629 nofp->nof_d_rw_drw--;
2630 }
2631 } else {
2632 if (nofp->nof_rw_drw == 0) {
2633 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2634 } else {
2635 nofp->nof_rw_drw--;
2636 }
2637 }
2638 }
2639 }
2640
2641 /* update the modes */
2642 nofp->nof_access = newAccessMode;
2643 nofp->nof_deny = newDenyMode;
2644 nofp->nof_opencnt--;
2645 lck_mtx_unlock(&nofp->nof_lock);
2646}
2647
2648#if CONFIG_NFS4
2649/*
2650 * Get the current (delegation, lock, open, default) stateid for this node.
2651 * If node has a delegation, use that stateid.
2652 * If pid has a lock, use the lockowner's stateid.
2653 * Or use the open file's stateid.
2654 * If no open file, use a default stateid of all ones.
2655 */
2656void
2657nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2658{
2659 struct nfsmount *nmp = NFSTONMP(np);
2660 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2661 struct nfs_open_owner *noop = NULL;
2662 struct nfs_open_file *nofp = NULL;
2663 struct nfs_lock_owner *nlop = NULL;
2664 nfs_stateid *s = NULL;
2665
2666 if (np->n_openflags & N_DELEG_MASK) {
2667 s = &np->n_dstateid;
2668 } else {
2669 if (p) {
2670 nlop = nfs_lock_owner_find(np, p, 0);
2671 }
2672 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2673 /* we hold locks, use lock stateid */
2674 s = &nlop->nlo_stateid;
2675 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2676 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2677 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2678 nofp->nof_access) {
2679 /* we (should) have the file open, use open stateid */
2680 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2681 nfs4_reopen(nofp, thd);
2682 }
2683 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2684 s = &nofp->nof_stateid;
2685 }
2686 }
2687 }
2688
2689 if (s) {
2690 sid->seqid = s->seqid;
2691 sid->other[0] = s->other[0];
2692 sid->other[1] = s->other[1];
2693 sid->other[2] = s->other[2];
2694 } else {
2695 /* named attributes may not have a stateid for reads, so don't complain for them */
2696 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2697 NP(np, "nfs_get_stateid: no stateid");
2698 }
2699 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2700 }
2701 if (nlop) {
2702 nfs_lock_owner_rele(nlop);
2703 }
2704 if (noop) {
2705 nfs_open_owner_rele(noop);
2706 }
2707}
2708
2709
2710/*
2711 * When we have a delegation, we may be able to perform the OPEN locally.
2712 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2713 */
2714int
2715nfs4_open_delegated(
2716 nfsnode_t np,
2717 struct nfs_open_file *nofp,
2718 uint32_t accessMode,
2719 uint32_t denyMode,
2720 vfs_context_t ctx)
2721{
2722 int error = 0, ismember, readtoo = 0, authorized = 0;
2723 uint32_t action;
2724 struct kauth_acl_eval eval;
2725 kauth_cred_t cred = vfs_context_ucred(ctx);
2726
2727 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2728 /*
2729 * Try to open it for read access too,
2730 * so the buffer cache can read data.
2731 */
2732 readtoo = 1;
2733 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2734 }
2735
2736tryagain:
2737 action = 0;
2738 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2739 action |= KAUTH_VNODE_READ_DATA;
2740 }
2741 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2742 action |= KAUTH_VNODE_WRITE_DATA;
2743 }
2744
2745 /* evaluate ACE (if we have one) */
2746 if (np->n_dace.ace_flags) {
2747 eval.ae_requested = action;
2748 eval.ae_acl = &np->n_dace;
2749 eval.ae_count = 1;
2750 eval.ae_options = 0;
2751 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2752 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2753 }
2754 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2755 if (!error && ismember) {
2756 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2757 }
2758
2759 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2760 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2761 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2762 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2763
2764 error = kauth_acl_evaluate(cred, &eval);
2765
2766 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2767 authorized = 1;
2768 }
2769 }
2770
2771 if (!authorized) {
2772 /* need to ask the server via ACCESS */
2773 struct vnop_access_args naa;
2774 naa.a_desc = &vnop_access_desc;
2775 naa.a_vp = NFSTOV(np);
2776 naa.a_action = action;
2777 naa.a_context = ctx;
2778 if (!(error = nfs_vnop_access(&naa))) {
2779 authorized = 1;
2780 }
2781 }
2782
2783 if (!authorized) {
2784 if (readtoo) {
2785 /* try again without the extra read access */
2786 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2787 readtoo = 0;
2788 goto tryagain;
2789 }
2790 return error ? error : EACCES;
2791 }
2792
2793 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2794
2795 return 0;
2796}
2797
2798
2799/*
2800 * Open a file with the given access/deny modes.
2801 *
2802 * If we have a delegation, we may be able to handle the open locally.
2803 * Otherwise, we will always send the open RPC even if this open's mode is
2804 * a subset of all the existing opens. This makes sure that we will always
2805 * be able to do a downgrade to any of the open modes.
2806 *
2807 * Note: local conflicts should have already been checked in nfs_open_file_find().
2808 */
2809int
2810nfs4_open(
2811 nfsnode_t np,
2812 struct nfs_open_file *nofp,
2813 uint32_t accessMode,
2814 uint32_t denyMode,
2815 vfs_context_t ctx)
2816{
2817 vnode_t vp = NFSTOV(np);
2818 vnode_t dvp = NULL;
2819 struct componentname cn;
2820 const char *vname = NULL;
2821 uint32_t namelen;
2822 char smallname[128];
2823 char *filename = NULL;
2824 int error = 0, readtoo = 0;
2825
2826 /*
2827 * We can handle the OPEN ourselves if we have a delegation,
2828 * unless it's a read delegation and the open is asking for
2829 * either write access or deny read. We also don't bother to
2830 * use the delegation if it's being returned.
2831 */
2832 if (np->n_openflags & N_DELEG_MASK) {
2833 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2834 return error;
2835 }
2836 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2837 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2838 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2839 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2840 nfs_open_state_clear_busy(np);
2841 return error;
2842 }
2843 nfs_open_state_clear_busy(np);
2844 }
2845
2846 /*
2847 * [sigh] We can't trust VFS to get the parent right for named
2848 * attribute nodes. (It likes to reparent the nodes after we've
2849 * created them.) Luckily we can probably get the right parent
2850 * from the n_parent we have stashed away.
2851 */
2852 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2853 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2854 dvp = NULL;
2855 }
2856 if (!dvp) {
2857 dvp = vnode_getparent(vp);
2858 }
2859 vname = vnode_getname(vp);
2860 if (!dvp || !vname) {
2861 if (!error) {
2862 error = EIO;
2863 }
2864 goto out;
2865 }
2866 filename = &smallname[0];
2867 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2868 if (namelen >= sizeof(smallname)) {
2869 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
2870 if (!filename) {
2871 error = ENOMEM;
2872 goto out;
2873 }
2874 snprintf(filename, namelen + 1, "%s", vname);
2875 }
2876 bzero(&cn, sizeof(cn));
2877 cn.cn_nameptr = filename;
2878 cn.cn_namelen = namelen;
2879
2880 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2881 /*
2882 * Try to open it for read access too,
2883 * so the buffer cache can read data.
2884 */
2885 readtoo = 1;
2886 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2887 }
2888tryagain:
2889 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2890 if (error) {
2891 if (!nfs_mount_state_error_should_restart(error) &&
2892 (error != EINTR) && (error != ERESTART) && readtoo) {
2893 /* try again without the extra read access */
2894 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2895 readtoo = 0;
2896 goto tryagain;
2897 }
2898 goto out;
2899 }
2900 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2901out:
2902 if (filename && (filename != &smallname[0])) {
2903 FREE(filename, M_TEMP);
2904 }
2905 if (vname) {
2906 vnode_putname(vname);
2907 }
2908 if (dvp != NULLVP) {
2909 vnode_put(dvp);
2910 }
2911 return error;
2912}
2913#endif /* CONFIG_NFS4 */
2914
2915int
2916nfs_vnop_mmap(
2917 struct vnop_mmap_args /* {
2918 * struct vnodeop_desc *a_desc;
2919 * vnode_t a_vp;
2920 * int a_fflags;
2921 * vfs_context_t a_context;
2922 * } */*ap)
2923{
2924 vfs_context_t ctx = ap->a_context;
2925 vnode_t vp = ap->a_vp;
2926 nfsnode_t np = VTONFS(vp);
2927 int error = 0, delegated = 0;
2928 uint8_t accessMode, denyMode;
2929 struct nfsmount *nmp;
2930 struct nfs_open_owner *noop = NULL;
2931 struct nfs_open_file *nofp = NULL;
2932
2933 nmp = VTONMP(vp);
2934 if (nfs_mount_gone(nmp)) {
2935 return ENXIO;
2936 }
2937
2938 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2939 return EINVAL;
2940 }
2941 if (np->n_flag & NREVOKE) {
2942 return EIO;
2943 }
2944
2945 /*
2946 * fflags contains some combination of: PROT_READ, PROT_WRITE
2947 * Since it's not possible to mmap() without having the file open for reading,
2948 * read access is always there (regardless if PROT_READ is not set).
2949 */
2950 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2951 if (ap->a_fflags & PROT_WRITE) {
2952 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2953 }
2954 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2955
2956 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2957 if (!noop) {
2958 return ENOMEM;
2959 }
2960
2961restart:
2962 error = nfs_mount_state_in_use_start(nmp, NULL);
2963 if (error) {
2964 nfs_open_owner_rele(noop);
2965 return error;
2966 }
2967 if (np->n_flag & NREVOKE) {
2968 error = EIO;
2969 nfs_mount_state_in_use_end(nmp, 0);
2970 nfs_open_owner_rele(noop);
2971 return error;
2972 }
2973
2974 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2975 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2976 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2977 error = EPERM;
2978 }
2979#if CONFIG_NFS4
2980 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2981 error = nfs4_reopen(nofp, NULL);
2982 nofp = NULL;
2983 if (!error) {
2984 nfs_mount_state_in_use_end(nmp, 0);
2985 goto restart;
2986 }
2987 }
2988#endif
2989 if (!error) {
2990 error = nfs_open_file_set_busy(nofp, NULL);
2991 }
2992 if (error) {
2993 nofp = NULL;
2994 goto out;
2995 }
2996
2997 /*
2998 * The open reference for mmap must mirror an existing open because
2999 * we may need to reclaim it after the file is closed.
3000 * So grab another open count matching the accessMode passed in.
3001 * If we already had an mmap open, prefer read/write without deny mode.
3002 * This means we may have to drop the current mmap open first.
3003 *
3004 * N.B. We should have an open for the mmap, because, mmap was
3005 * called on an open descriptor, or we've created an open for read
3006 * from reading the first page for execve. However, if we piggy
3007 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
3008 * that open may have closed.
3009 */
3010
3011 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
3012 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
3013 /* We shouldn't get here. We've already open the file for execve */
3014 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
3015 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3016 }
3017 /*
3018 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3019 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3020 */
3021 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
3022 /* not asking for just read access -> fail */
3023 error = EPERM;
3024 goto out;
3025 }
3026 /* we don't have the file open, so open it for read access */
3027 if (nmp->nm_vers < NFS_VER4) {
3028 /* NFS v2/v3 opens are always allowed - so just add it. */
3029 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
3030 error = 0;
3031 }
3032#if CONFIG_NFS4
3033 else {
3034 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3035 }
3036#endif
3037 if (!error) {
3038 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3039 }
3040 if (error) {
3041 goto out;
3042 }
3043 }
3044
3045 /* determine deny mode for open */
3046 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3047 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3048 delegated = 1;
3049 if (nofp->nof_d_rw) {
3050 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3051 } else if (nofp->nof_d_rw_dw) {
3052 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3053 } else if (nofp->nof_d_rw_drw) {
3054 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3055 }
3056 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3057 delegated = 0;
3058 if (nofp->nof_rw) {
3059 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3060 } else if (nofp->nof_rw_dw) {
3061 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3062 } else if (nofp->nof_rw_drw) {
3063 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3064 }
3065 } else {
3066 error = EPERM;
3067 }
3068 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3069 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3070 delegated = 1;
3071 if (nofp->nof_d_r) {
3072 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3073 } else if (nofp->nof_d_r_dw) {
3074 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3075 } else if (nofp->nof_d_r_drw) {
3076 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3077 }
3078 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3079 delegated = 0;
3080 if (nofp->nof_r) {
3081 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3082 } else if (nofp->nof_r_dw) {
3083 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3084 } else if (nofp->nof_r_drw) {
3085 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3086 }
3087 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3088 /*
3089 * This clause and the one below is to co-opt a read write access
3090 * for a read only mmaping. We probably got here in that an
3091 * existing rw open for an executable file already exists.
3092 */
3093 delegated = 1;
3094 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3095 if (nofp->nof_d_rw) {
3096 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3097 } else if (nofp->nof_d_rw_dw) {
3098 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3099 } else if (nofp->nof_d_rw_drw) {
3100 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3101 }
3102 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3103 delegated = 0;
3104 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3105 if (nofp->nof_rw) {
3106 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3107 } else if (nofp->nof_rw_dw) {
3108 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3109 } else if (nofp->nof_rw_drw) {
3110 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3111 }
3112 } else {
3113 error = EPERM;
3114 }
3115 }
3116 if (error) { /* mmap mode without proper open mode */
3117 goto out;
3118 }
3119
3120 /*
3121 * If the existing mmap access is more than the new access OR the
3122 * existing access is the same and the existing deny mode is less,
3123 * then we'll stick with the existing mmap open mode.
3124 */
3125 if ((nofp->nof_mmap_access > accessMode) ||
3126 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3127 goto out;
3128 }
3129
3130 /* update mmap open mode */
3131 if (nofp->nof_mmap_access) {
3132 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3133 if (error) {
3134 if (!nfs_mount_state_error_should_restart(error)) {
3135 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3136 }
3137 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3138 goto out;
3139 }
3140 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3141 }
3142
3143 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3144 nofp->nof_mmap_access = accessMode;
3145 nofp->nof_mmap_deny = denyMode;
3146
3147out:
3148 if (nofp) {
3149 nfs_open_file_clear_busy(nofp);
3150 }
3151 if (nfs_mount_state_in_use_end(nmp, error)) {
3152 nofp = NULL;
3153 goto restart;
3154 }
3155 if (noop) {
3156 nfs_open_owner_rele(noop);
3157 }
3158
3159 if (!error) {
3160 int ismapped = 0;
3161 nfs_node_lock_force(np);
3162 if ((np->n_flag & NISMAPPED) == 0) {
3163 np->n_flag |= NISMAPPED;
3164 ismapped = 1;
3165 }
3166 nfs_node_unlock(np);
3167 if (ismapped) {
3168 lck_mtx_lock(&nmp->nm_lock);
3169 nmp->nm_state &= ~NFSSTA_SQUISHY;
3170 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3171 if (nmp->nm_curdeadtimeout <= 0) {
3172 nmp->nm_deadto_start = 0;
3173 }
3174 nmp->nm_mappers++;
3175 lck_mtx_unlock(&nmp->nm_lock);
3176 }
3177 }
3178
3179 return error;
3180}
3181
3182int
3183nfs_vnop_mmap_check(
3184 struct vnop_mmap_check_args /* {
3185 * struct vnodeop_desc *a_desc;
3186 * vnode_t a_vp;
3187 * int a_flags;
3188 * vfs_context_t a_context;
3189 * } */*ap)
3190{
3191 vfs_context_t ctx = ap->a_context;
3192 vnode_t vp = ap->a_vp;
3193 struct nfsmount *nmp = VTONMP(vp);
3194 struct vnop_access_args naa;
3195 int error = 0;
3196
3197 if (nfs_mount_gone(nmp)) {
3198 return ENXIO;
3199 }
3200
3201 if (vnode_isreg(vp)) {
3202 /*
3203 * We only need to ensure that a page-in will be
3204 * possible with these credentials. Everything
3205 * else has been checked at other layers.
3206 */
3207 naa.a_desc = &vnop_access_desc;
3208 naa.a_vp = vp;
3209 naa.a_action = KAUTH_VNODE_READ_DATA;
3210 naa.a_context = ctx;
3211
3212 /* compute actual success/failure based on accessibility */
3213 error = nfs_vnop_access(&naa);
3214 }
3215
3216 return error;
3217}
3218
3219int
3220nfs_vnop_mnomap(
3221 struct vnop_mnomap_args /* {
3222 * struct vnodeop_desc *a_desc;
3223 * vnode_t a_vp;
3224 * vfs_context_t a_context;
3225 * } */*ap)
3226{
3227 vfs_context_t ctx = ap->a_context;
3228 vnode_t vp = ap->a_vp;
3229 nfsnode_t np = VTONFS(vp);
3230 struct nfsmount *nmp;
3231 struct nfs_open_file *nofp = NULL;
3232 off_t size;
3233 int error;
3234 int is_mapped_flag = 0;
3235
3236 nmp = VTONMP(vp);
3237 if (nfs_mount_gone(nmp)) {
3238 return ENXIO;
3239 }
3240
3241 nfs_node_lock_force(np);
3242 if (np->n_flag & NISMAPPED) {
3243 is_mapped_flag = 1;
3244 np->n_flag &= ~NISMAPPED;
3245 }
3246 nfs_node_unlock(np);
3247 if (is_mapped_flag) {
3248 lck_mtx_lock(&nmp->nm_lock);
3249 if (nmp->nm_mappers) {
3250 nmp->nm_mappers--;
3251 } else {
3252 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3253 }
3254 lck_mtx_unlock(&nmp->nm_lock);
3255 }
3256
3257 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3258 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3259 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3260 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3261 }
3262
3263 /* walk all open files and close all mmap opens */
3264loop:
3265 error = nfs_mount_state_in_use_start(nmp, NULL);
3266 if (error) {
3267 return error;
3268 }
3269 lck_mtx_lock(&np->n_openlock);
3270 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3271 if (!nofp->nof_mmap_access) {
3272 continue;
3273 }
3274 lck_mtx_unlock(&np->n_openlock);
3275#if CONFIG_NFS4
3276 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3277 error = nfs4_reopen(nofp, NULL);
3278 if (!error) {
3279 nfs_mount_state_in_use_end(nmp, 0);
3280 goto loop;
3281 }
3282 }
3283#endif
3284 if (!error) {
3285 error = nfs_open_file_set_busy(nofp, NULL);
3286 }
3287 if (error) {
3288 lck_mtx_lock(&np->n_openlock);
3289 break;
3290 }
3291 if (nofp->nof_mmap_access) {
3292 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3293 if (!nfs_mount_state_error_should_restart(error)) {
3294 if (error) { /* not a state-operation-restarting error, so just clear the access */
3295 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3296 }
3297 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3298 }
3299 if (error) {
3300 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3301 }
3302 }
3303 nfs_open_file_clear_busy(nofp);
3304 nfs_mount_state_in_use_end(nmp, error);
3305 goto loop;
3306 }
3307 lck_mtx_unlock(&np->n_openlock);
3308 nfs_mount_state_in_use_end(nmp, error);
3309 return error;
3310}
3311
3312/*
3313 * Search a node's lock owner list for the owner for this process.
3314 * If not found and "alloc" is set, then allocate a new one.
3315 */
3316struct nfs_lock_owner *
3317nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3318{
3319 pid_t pid = proc_pid(p);
3320 struct nfs_lock_owner *nlop, *newnlop = NULL;
3321
3322tryagain:
3323 lck_mtx_lock(&np->n_openlock);
3324 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3325 os_ref_count_t newcount;
3326
3327 if (nlop->nlo_pid != pid) {
3328 continue;
3329 }
3330 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
3331 break;
3332 }
3333 /* stale lock owner... reuse it if we can */
3334 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3335 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3336 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3337 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3338 lck_mtx_unlock(&np->n_openlock);
3339 goto tryagain;
3340 }
3341 nlop->nlo_pid_start = p->p_start;
3342 nlop->nlo_seqid = 0;
3343 nlop->nlo_stategenid = 0;
3344 break;
3345 }
3346
3347 if (!nlop && !newnlop && alloc) {
3348 lck_mtx_unlock(&np->n_openlock);
3349 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3350 if (!newnlop) {
3351 return NULL;
3352 }
3353 bzero(newnlop, sizeof(*newnlop));
3354 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3355 newnlop->nlo_pid = pid;
3356 newnlop->nlo_pid_start = p->p_start;
3357 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3358 TAILQ_INIT(&newnlop->nlo_locks);
3359 goto tryagain;
3360 }
3361 if (!nlop && newnlop) {
3362 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3363 os_ref_init(&newnlop->nlo_refcnt, NULL);
3364 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3365 nlop = newnlop;
3366 }
3367 lck_mtx_unlock(&np->n_openlock);
3368
3369 if (newnlop && (nlop != newnlop)) {
3370 nfs_lock_owner_destroy(newnlop);
3371 }
3372
3373 if (nlop) {
3374 nfs_lock_owner_ref(nlop);
3375 }
3376
3377 return nlop;
3378}
3379
3380/*
3381 * destroy a lock owner that's no longer needed
3382 */
3383void
3384nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3385{
3386 if (nlop->nlo_open_owner) {
3387 nfs_open_owner_rele(nlop->nlo_open_owner);
3388 nlop->nlo_open_owner = NULL;
3389 }
3390 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3391 FREE(nlop, M_TEMP);
3392}
3393
3394/*
3395 * acquire a reference count on a lock owner
3396 */
3397void
3398nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3399{
3400 lck_mtx_lock(&nlop->nlo_lock);
3401 os_ref_retain_locked(&nlop->nlo_refcnt);
3402 lck_mtx_unlock(&nlop->nlo_lock);
3403}
3404
3405/*
3406 * drop a reference count on a lock owner and destroy it if
3407 * it is no longer referenced and no longer on the mount's list.
3408 */
3409void
3410nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3411{
3412 os_ref_count_t newcount;
3413
3414 lck_mtx_lock(&nlop->nlo_lock);
3415 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3416 panic("nfs_lock_owner_rele: no refcnt");
3417 }
3418 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3419 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3420 panic("nfs_lock_owner_rele: busy");
3421 }
3422 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3423 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3424 lck_mtx_unlock(&nlop->nlo_lock);
3425 return;
3426 }
3427 /* owner is no longer referenced or linked to mount, so destroy it */
3428 lck_mtx_unlock(&nlop->nlo_lock);
3429 nfs_lock_owner_destroy(nlop);
3430}
3431
3432/*
3433 * Mark a lock owner as busy because we are about to
3434 * start an operation that uses and updates lock owner state.
3435 */
3436int
3437nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3438{
3439 struct nfsmount *nmp;
3440 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3441 int error = 0, slpflag;
3442
3443 nmp = nlop->nlo_open_owner->noo_mount;
3444 if (nfs_mount_gone(nmp)) {
3445 return ENXIO;
3446 }
3447 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3448
3449 lck_mtx_lock(&nlop->nlo_lock);
3450 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3451 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3452 break;
3453 }
3454 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3455 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3456 slpflag = 0;
3457 }
3458 if (!error) {
3459 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3460 }
3461 lck_mtx_unlock(&nlop->nlo_lock);
3462
3463 return error;
3464}
3465
3466/*
3467 * Clear the busy flag on a lock owner and wake up anyone waiting
3468 * to mark it busy.
3469 */
3470void
3471nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3472{
3473 int wanted;
3474
3475 lck_mtx_lock(&nlop->nlo_lock);
3476 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3477 panic("nfs_lock_owner_clear_busy");
3478 }
3479 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3480 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3481 lck_mtx_unlock(&nlop->nlo_lock);
3482 if (wanted) {
3483 wakeup(nlop);
3484 }
3485}
3486
3487/*
3488 * Insert a held lock into a lock owner's sorted list.
3489 * (flock locks are always inserted at the head the list)
3490 */
3491void
3492nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3493{
3494 struct nfs_file_lock *nflp;
3495
3496 /* insert new lock in lock owner's held lock list */
3497 lck_mtx_lock(&nlop->nlo_lock);
3498 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3499 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3500 } else {
3501 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3502 if (newnflp->nfl_start < nflp->nfl_start) {
3503 break;
3504 }
3505 }
3506 if (nflp) {
3507 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3508 } else {
3509 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3510 }
3511 }
3512 lck_mtx_unlock(&nlop->nlo_lock);
3513}
3514
3515/*
3516 * Get a file lock structure for this lock owner.
3517 */
3518struct nfs_file_lock *
3519nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3520{
3521 struct nfs_file_lock *nflp = NULL;
3522
3523 lck_mtx_lock(&nlop->nlo_lock);
3524 if (!nlop->nlo_alock.nfl_owner) {
3525 nflp = &nlop->nlo_alock;
3526 nflp->nfl_owner = nlop;
3527 }
3528 lck_mtx_unlock(&nlop->nlo_lock);
3529 if (!nflp) {
3530 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3531 if (!nflp) {
3532 return NULL;
3533 }
3534 bzero(nflp, sizeof(*nflp));
3535 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3536 nflp->nfl_owner = nlop;
3537 }
3538 nfs_lock_owner_ref(nlop);
3539 return nflp;
3540}
3541
3542/*
3543 * destroy the given NFS file lock structure
3544 */
3545void
3546nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3547{
3548 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3549
3550 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3551 nflp->nfl_owner = NULL;
3552 FREE(nflp, M_TEMP);
3553 } else {
3554 lck_mtx_lock(&nlop->nlo_lock);
3555 bzero(nflp, sizeof(*nflp));
3556 lck_mtx_unlock(&nlop->nlo_lock);
3557 }
3558 nfs_lock_owner_rele(nlop);
3559}
3560
3561/*
3562 * Check if one file lock conflicts with another.
3563 * (nflp1 is the new lock. nflp2 is the existing lock.)
3564 */
3565int
3566nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3567{
3568 /* no conflict if lock is dead */
3569 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3570 return 0;
3571 }
3572 /* no conflict if it's ours - unless the lock style doesn't match */
3573 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3574 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3575 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3576 (nflp1->nfl_start > nflp2->nfl_start) &&
3577 (nflp1->nfl_end < nflp2->nfl_end)) {
3578 *willsplit = 1;
3579 }
3580 return 0;
3581 }
3582 /* no conflict if ranges don't overlap */
3583 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3584 return 0;
3585 }
3586 /* no conflict if neither lock is exclusive */
3587 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3588 return 0;
3589 }
3590 /* conflict */
3591 return 1;
3592}
3593
3594#if CONFIG_NFS4
3595/*
3596 * Send an NFSv4 LOCK RPC to the server.
3597 */
3598int
3599nfs4_setlock_rpc(
3600 nfsnode_t np,
3601 struct nfs_open_file *nofp,
3602 struct nfs_file_lock *nflp,
3603 int reclaim,
3604 int flags,
3605 thread_t thd,
3606 kauth_cred_t cred)
3607{
3608 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3609 struct nfsmount *nmp;
3610 struct nfsm_chain nmreq, nmrep;
3611 uint64_t xid;
3612 uint32_t locktype;
3613 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3614 struct nfsreq_secinfo_args si;
3615
3616 nmp = NFSTONMP(np);
3617 if (nfs_mount_gone(nmp)) {
3618 return ENXIO;
3619 }
3620 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3621 return EINVAL;
3622 }
3623
3624 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3625 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3626 ((nflp->nfl_type == F_WRLCK) ?
3627 NFS_LOCK_TYPE_WRITEW :
3628 NFS_LOCK_TYPE_READW) :
3629 ((nflp->nfl_type == F_WRLCK) ?
3630 NFS_LOCK_TYPE_WRITE :
3631 NFS_LOCK_TYPE_READ);
3632 if (newlocker) {
3633 error = nfs_open_file_set_busy(nofp, thd);
3634 if (error) {
3635 return error;
3636 }
3637 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3638 if (error) {
3639 nfs_open_file_clear_busy(nofp);
3640 return error;
3641 }
3642 if (!nlop->nlo_open_owner) {
3643 nfs_open_owner_ref(nofp->nof_owner);
3644 nlop->nlo_open_owner = nofp->nof_owner;
3645 }
3646 }
3647 error = nfs_lock_owner_set_busy(nlop, thd);
3648 if (error) {
3649 if (newlocker) {
3650 nfs_open_owner_clear_busy(nofp->nof_owner);
3651 nfs_open_file_clear_busy(nofp);
3652 }
3653 return error;
3654 }
3655
3656 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3657 nfsm_chain_null(&nmreq);
3658 nfsm_chain_null(&nmrep);
3659
3660 // PUTFH, GETATTR, LOCK
3661 numops = 3;
3662 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3663 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3664 numops--;
3665 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3666 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3667 numops--;
3668 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3669 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3670 numops--;
3671 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3672 nfsm_chain_add_32(error, &nmreq, locktype);
3673 nfsm_chain_add_32(error, &nmreq, reclaim);
3674 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3675 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3676 nfsm_chain_add_32(error, &nmreq, newlocker);
3677 if (newlocker) {
3678 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3679 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3680 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3681 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3682 } else {
3683 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3684 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3685 }
3686 nfsm_chain_build_done(error, &nmreq);
3687 nfsm_assert(error, (numops == 0), EPROTO);
3688 nfsmout_if(error);
3689
3690 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3691
3692 if ((lockerror = nfs_node_lock(np))) {
3693 error = lockerror;
3694 }
3695 nfsm_chain_skip_tag(error, &nmrep);
3696 nfsm_chain_get_32(error, &nmrep, numops);
3697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3698 nfsmout_if(error);
3699 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3700 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3701 nfsmout_if(error);
3702 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3703 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3704 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3705
3706 /* Update the lock owner's stategenid once it appears the server has state for it. */
3707 /* We determine this by noting the request was successful (we got a stateid). */
3708 if (newlocker && !error) {
3709 nlop->nlo_stategenid = nmp->nm_stategenid;
3710 }
3711nfsmout:
3712 if (!lockerror) {
3713 nfs_node_unlock(np);
3714 }
3715 nfs_lock_owner_clear_busy(nlop);
3716 if (newlocker) {
3717 nfs_open_owner_clear_busy(nofp->nof_owner);
3718 nfs_open_file_clear_busy(nofp);
3719 }
3720 nfsm_chain_cleanup(&nmreq);
3721 nfsm_chain_cleanup(&nmrep);
3722 return error;
3723}
3724
3725/*
3726 * Send an NFSv4 LOCKU RPC to the server.
3727 */
3728int
3729nfs4_unlock_rpc(
3730 nfsnode_t np,
3731 struct nfs_lock_owner *nlop,
3732 int type,
3733 uint64_t start,
3734 uint64_t end,
3735 int flags,
3736 thread_t thd,
3737 kauth_cred_t cred)
3738{
3739 struct nfsmount *nmp;
3740 struct nfsm_chain nmreq, nmrep;
3741 uint64_t xid;
3742 int error = 0, lockerror = ENOENT, numops, status;
3743 struct nfsreq_secinfo_args si;
3744
3745 nmp = NFSTONMP(np);
3746 if (nfs_mount_gone(nmp)) {
3747 return ENXIO;
3748 }
3749 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3750 return EINVAL;
3751 }
3752
3753 error = nfs_lock_owner_set_busy(nlop, NULL);
3754 if (error) {
3755 return error;
3756 }
3757
3758 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3759 nfsm_chain_null(&nmreq);
3760 nfsm_chain_null(&nmrep);
3761
3762 // PUTFH, GETATTR, LOCKU
3763 numops = 3;
3764 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3765 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3766 numops--;
3767 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3768 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3769 numops--;
3770 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3771 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3772 numops--;
3773 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3774 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3775 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3776 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3777 nfsm_chain_add_64(error, &nmreq, start);
3778 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3779 nfsm_chain_build_done(error, &nmreq);
3780 nfsm_assert(error, (numops == 0), EPROTO);
3781 nfsmout_if(error);
3782
3783 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3784
3785 if ((lockerror = nfs_node_lock(np))) {
3786 error = lockerror;
3787 }
3788 nfsm_chain_skip_tag(error, &nmrep);
3789 nfsm_chain_get_32(error, &nmrep, numops);
3790 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3791 nfsmout_if(error);
3792 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3793 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3794 nfsmout_if(error);
3795 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3796 nfs_owner_seqid_increment(NULL, nlop, error);
3797 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3798nfsmout:
3799 if (!lockerror) {
3800 nfs_node_unlock(np);
3801 }
3802 nfs_lock_owner_clear_busy(nlop);
3803 nfsm_chain_cleanup(&nmreq);
3804 nfsm_chain_cleanup(&nmrep);
3805 return error;
3806}
3807
3808/*
3809 * Send an NFSv4 LOCKT RPC to the server.
3810 */
3811int
3812nfs4_getlock_rpc(
3813 nfsnode_t np,
3814 struct nfs_lock_owner *nlop,
3815 struct flock *fl,
3816 uint64_t start,
3817 uint64_t end,
3818 vfs_context_t ctx)
3819{
3820 struct nfsmount *nmp;
3821 struct nfsm_chain nmreq, nmrep;
3822 uint64_t xid, val64 = 0;
3823 uint32_t val = 0;
3824 int error = 0, lockerror, numops, status;
3825 struct nfsreq_secinfo_args si;
3826
3827 nmp = NFSTONMP(np);
3828 if (nfs_mount_gone(nmp)) {
3829 return ENXIO;
3830 }
3831 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3832 return EINVAL;
3833 }
3834
3835 lockerror = ENOENT;
3836 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3837 nfsm_chain_null(&nmreq);
3838 nfsm_chain_null(&nmrep);
3839
3840 // PUTFH, GETATTR, LOCKT
3841 numops = 3;
3842 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3843 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3844 numops--;
3845 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3846 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3847 numops--;
3848 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3849 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3850 numops--;
3851 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3852 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3853 nfsm_chain_add_64(error, &nmreq, start);
3854 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3855 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3856 nfsm_chain_build_done(error, &nmreq);
3857 nfsm_assert(error, (numops == 0), EPROTO);
3858 nfsmout_if(error);
3859
3860 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3861
3862 if ((lockerror = nfs_node_lock(np))) {
3863 error = lockerror;
3864 }
3865 nfsm_chain_skip_tag(error, &nmrep);
3866 nfsm_chain_get_32(error, &nmrep, numops);
3867 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3868 nfsmout_if(error);
3869 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3870 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3871 nfsmout_if(error);
3872 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3873 if (error == NFSERR_DENIED) {
3874 error = 0;
3875 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3876 nfsm_chain_get_64(error, &nmrep, val64);
3877 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3878 nfsm_chain_get_32(error, &nmrep, val);
3879 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3880 fl->l_pid = 0;
3881 fl->l_whence = SEEK_SET;
3882 } else if (!error) {
3883 fl->l_type = F_UNLCK;
3884 }
3885nfsmout:
3886 if (!lockerror) {
3887 nfs_node_unlock(np);
3888 }
3889 nfsm_chain_cleanup(&nmreq);
3890 nfsm_chain_cleanup(&nmrep);
3891 return error;
3892}
3893#endif /* CONFIG_NFS4 */
3894
3895/*
3896 * Check for any conflicts with the given lock.
3897 *
3898 * Checking for a lock doesn't require the file to be opened.
3899 * So we skip all the open owner, open file, lock owner work
3900 * and just check for a conflicting lock.
3901 */
3902int
3903nfs_advlock_getlock(
3904 nfsnode_t np,
3905 struct nfs_lock_owner *nlop,
3906 struct flock *fl,
3907 uint64_t start,
3908 uint64_t end,
3909 vfs_context_t ctx)
3910{
3911 struct nfsmount *nmp;
3912 struct nfs_file_lock *nflp;
3913 int error = 0, answered = 0;
3914
3915 nmp = NFSTONMP(np);
3916 if (nfs_mount_gone(nmp)) {
3917 return ENXIO;
3918 }
3919
3920restart:
3921 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3922 return error;
3923 }
3924
3925 lck_mtx_lock(&np->n_openlock);
3926 /* scan currently held locks for conflict */
3927 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3928 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3929 continue;
3930 }
3931 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3932 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3933 break;
3934 }
3935 }
3936 if (nflp) {
3937 /* found a conflicting lock */
3938 fl->l_type = nflp->nfl_type;
3939 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3940 fl->l_start = nflp->nfl_start;
3941 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3942 fl->l_whence = SEEK_SET;
3943 answered = 1;
3944 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3945 /*
3946 * If we have a write delegation, we know there can't be other
3947 * locks on the server. So the answer is no conflicting lock found.
3948 */
3949 fl->l_type = F_UNLCK;
3950 answered = 1;
3951 }
3952 lck_mtx_unlock(&np->n_openlock);
3953 if (answered) {
3954 nfs_mount_state_in_use_end(nmp, 0);
3955 return 0;
3956 }
3957
3958 /* no conflict found locally, so ask the server */
3959 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3960
3961 if (nfs_mount_state_in_use_end(nmp, error)) {
3962 goto restart;
3963 }
3964 return error;
3965}
3966
3967/*
3968 * Acquire a file lock for the given range.
3969 *
3970 * Add the lock (request) to the lock queue.
3971 * Scan the lock queue for any conflicting locks.
3972 * If a conflict is found, block or return an error.
3973 * Once end of queue is reached, send request to the server.
3974 * If the server grants the lock, scan the lock queue and
3975 * update any existing locks. Then (optionally) scan the
3976 * queue again to coalesce any locks adjacent to the new one.
3977 */
3978int
3979nfs_advlock_setlock(
3980 nfsnode_t np,
3981 struct nfs_open_file *nofp,
3982 struct nfs_lock_owner *nlop,
3983 int op,
3984 uint64_t start,
3985 uint64_t end,
3986 int style,
3987 short type,
3988 vfs_context_t ctx)
3989{
3990 struct nfsmount *nmp;
3991 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3992 struct nfs_file_lock *coalnflp;
3993 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3994 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
3995
3996 nmp = NFSTONMP(np);
3997 if (nfs_mount_gone(nmp)) {
3998 return ENXIO;
3999 }
4000 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4001
4002 if ((type != F_RDLCK) && (type != F_WRLCK)) {
4003 return EINVAL;
4004 }
4005
4006 /* allocate a new lock */
4007 newnflp = nfs_file_lock_alloc(nlop);
4008 if (!newnflp) {
4009 return ENOLCK;
4010 }
4011 newnflp->nfl_start = start;
4012 newnflp->nfl_end = end;
4013 newnflp->nfl_type = type;
4014 if (op == F_SETLKW) {
4015 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
4016 }
4017 newnflp->nfl_flags |= style;
4018 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
4019
4020 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
4021 /*
4022 * For exclusive flock-style locks, if we block waiting for the
4023 * lock, we need to first release any currently held shared
4024 * flock-style lock. So, the first thing we do is check if we
4025 * have a shared flock-style lock.
4026 */
4027 nflp = TAILQ_FIRST(&nlop->nlo_locks);
4028 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
4029 nflp = NULL;
4030 }
4031 if (nflp && (nflp->nfl_type != F_RDLCK)) {
4032 nflp = NULL;
4033 }
4034 flocknflp = nflp;
4035 }
4036
4037restart:
4038 restart = 0;
4039 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4040 if (error) {
4041 goto error_out;
4042 }
4043 inuse = 1;
4044 if (np->n_flag & NREVOKE) {
4045 error = EIO;
4046 nfs_mount_state_in_use_end(nmp, 0);
4047 inuse = 0;
4048 goto error_out;
4049 }
4050#if CONFIG_NFS4
4051 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
4054 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
4055 if (error) {
4056 goto error_out;
4057 }
4058 goto restart;
4059 }
4060#endif
4061
4062 lck_mtx_lock(&np->n_openlock);
4063 if (!inqueue) {
4064 /* insert new lock at beginning of list */
4065 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4066 inqueue = 1;
4067 }
4068
4069 /* scan current list of locks (held and pending) for conflicts */
4070 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4071 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4072 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
4073 continue;
4074 }
4075 /* Conflict */
4076 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4077 error = EAGAIN;
4078 break;
4079 }
4080 /* Block until this lock is no longer held. */
4081 if (nflp->nfl_blockcnt == UINT_MAX) {
4082 error = ENOLCK;
4083 break;
4084 }
4085 nflp->nfl_blockcnt++;
4086 do {
4087 if (flocknflp) {
4088 /* release any currently held shared lock before sleeping */
4089 lck_mtx_unlock(&np->n_openlock);
4090 nfs_mount_state_in_use_end(nmp, 0);
4091 inuse = 0;
4092 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4093 flocknflp = NULL;
4094 if (!error) {
4095 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4096 }
4097 if (error) {
4098 lck_mtx_lock(&np->n_openlock);
4099 break;
4100 }
4101 inuse = 1;
4102 lck_mtx_lock(&np->n_openlock);
4103 /* no need to block/sleep if the conflict is gone */
4104 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4105 break;
4106 }
4107 }
4108 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4109 slpflag = 0;
4110 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4111 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4112 /* looks like we have a recover pending... restart */
4113 restart = 1;
4114 lck_mtx_unlock(&np->n_openlock);
4115 nfs_mount_state_in_use_end(nmp, 0);
4116 inuse = 0;
4117 lck_mtx_lock(&np->n_openlock);
4118 break;
4119 }
4120 if (!error && (np->n_flag & NREVOKE)) {
4121 error = EIO;
4122 }
4123 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4124 nflp->nfl_blockcnt--;
4125 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4126 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4127 nfs_file_lock_destroy(nflp);
4128 }
4129 if (error || restart) {
4130 break;
4131 }
4132 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4133 /* So, start this lock-scanning loop over from where it started. */
4134 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4135 }
4136 lck_mtx_unlock(&np->n_openlock);
4137 if (restart) {
4138 goto restart;
4139 }
4140 if (error) {
4141 goto error_out;
4142 }
4143
4144 if (willsplit) {
4145 /*
4146 * It looks like this operation is splitting a lock.
4147 * We allocate a new lock now so we don't have to worry
4148 * about the allocation failing after we've updated some state.
4149 */
4150 nflp2 = nfs_file_lock_alloc(nlop);
4151 if (!nflp2) {
4152 error = ENOLCK;
4153 goto error_out;
4154 }
4155 }
4156
4157 /* once scan for local conflicts is clear, send request to server */
4158 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4159 goto error_out;
4160 }
4161 busy = 1;
4162 delay = 0;
4163 do {
4164#if CONFIG_NFS4
4165 /* do we have a delegation? (that we're not returning?) */
4166 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4167 if (np->n_openflags & N_DELEG_WRITE) {
4168 /* with a write delegation, just take the lock delegated */
4169 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4170 error = 0;
4171 /* make sure the lock owner knows its open owner */
4172 if (!nlop->nlo_open_owner) {
4173 nfs_open_owner_ref(nofp->nof_owner);
4174 nlop->nlo_open_owner = nofp->nof_owner;
4175 }
4176 break;
4177 } else {
4178 /*
4179 * If we don't have any non-delegated opens but we do have
4180 * delegated opens, then we need to first claim the delegated
4181 * opens so that the lock request on the server can be associated
4182 * with an open it knows about.
4183 */
4184 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4185 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4186 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4187 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4188 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4189 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4190 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4191 if (error) {
4192 break;
4193 }
4194 }
4195 }
4196 }
4197#endif
4198 if (np->n_flag & NREVOKE) {
4199 error = EIO;
4200 }
4201 if (!error) {
4202 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4203 }
4204 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4205 break;
4206 }
4207 /* request was denied due to either conflict or grace period */
4208 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4209 error = EAGAIN;
4210 break;
4211 }
4212 if (flocknflp) {
4213 /* release any currently held shared lock before sleeping */
4214 nfs_open_state_clear_busy(np);
4215 busy = 0;
4216 if (inuse) {
4217 nfs_mount_state_in_use_end(nmp, 0);
4218 inuse = 0;
4219 }
4220 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4221 flocknflp = NULL;
4222 if (!error2) {
4223 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4224 }
4225 if (!error2) {
4226 inuse = 1;
4227 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4228 }
4229 if (error2) {
4230 error = error2;
4231 break;
4232 }
4233 busy = 1;
4234 }
4235 /*
4236 * Wait a little bit and send the request again.
4237 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4238 */
4239 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4240 if (error == NFSERR_GRACE) {
4241 delay = 4;
4242 }
4243 if (delay < 4) {
4244 delay++;
4245 }
4246 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4247 slpflag = 0;
4248 }
4249 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4250 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4251 /* looks like we have a recover pending... restart */
4252 nfs_open_state_clear_busy(np);
4253 busy = 0;
4254 if (inuse) {
4255 nfs_mount_state_in_use_end(nmp, 0);
4256 inuse = 0;
4257 }
4258 goto restart;
4259 }
4260 if (!error && (np->n_flag & NREVOKE)) {
4261 error = EIO;
4262 }
4263 } while (!error);
4264
4265error_out:
4266 if (nfs_mount_state_error_should_restart(error)) {
4267 /* looks like we need to restart this operation */
4268 if (busy) {
4269 nfs_open_state_clear_busy(np);
4270 busy = 0;
4271 }
4272 if (inuse) {
4273 nfs_mount_state_in_use_end(nmp, error);
4274 inuse = 0;
4275 }
4276 goto restart;
4277 }
4278 lck_mtx_lock(&np->n_openlock);
4279 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4280 if (error) {
4281 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4282 if (newnflp->nfl_blockcnt) {
4283 /* wake up anyone blocked on this lock */
4284 wakeup(newnflp);
4285 } else {
4286 /* remove newnflp from lock list and destroy */
4287 if (inqueue) {
4288 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4289 }
4290 nfs_file_lock_destroy(newnflp);
4291 }
4292 lck_mtx_unlock(&np->n_openlock);
4293 if (busy) {
4294 nfs_open_state_clear_busy(np);
4295 }
4296 if (inuse) {
4297 nfs_mount_state_in_use_end(nmp, error);
4298 }
4299 if (nflp2) {
4300 nfs_file_lock_destroy(nflp2);
4301 }
4302 return error;
4303 }
4304
4305 /* server granted the lock */
4306
4307 /*
4308 * Scan for locks to update.
4309 *
4310 * Locks completely covered are killed.
4311 * At most two locks may need to be clipped.
4312 * It's possible that a single lock may need to be split.
4313 */
4314 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4315 if (nflp == newnflp) {
4316 continue;
4317 }
4318 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4319 continue;
4320 }
4321 if (nflp->nfl_owner != nlop) {
4322 continue;
4323 }
4324 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4325 continue;
4326 }
4327 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4328 continue;
4329 }
4330 /* here's one to update */
4331 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4332 /* The entire lock is being replaced. */
4333 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4334 lck_mtx_lock(&nlop->nlo_lock);
4335 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4336 lck_mtx_unlock(&nlop->nlo_lock);
4337 /* lock will be destroyed below, if no waiters */
4338 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4339 /* We're replacing a range in the middle of a lock. */
4340 /* The current lock will be split into two locks. */
4341 /* Update locks and insert new lock after current lock. */
4342 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4343 nflp2->nfl_type = nflp->nfl_type;
4344 nflp2->nfl_start = newnflp->nfl_end + 1;
4345 nflp2->nfl_end = nflp->nfl_end;
4346 nflp->nfl_end = newnflp->nfl_start - 1;
4347 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4348 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4349 nextnflp = nflp2;
4350 nflp2 = NULL;
4351 } else if (newnflp->nfl_start > nflp->nfl_start) {
4352 /* We're replacing the end of a lock. */
4353 nflp->nfl_end = newnflp->nfl_start - 1;
4354 } else if (newnflp->nfl_end < nflp->nfl_end) {
4355 /* We're replacing the start of a lock. */
4356 nflp->nfl_start = newnflp->nfl_end + 1;
4357 }
4358 if (nflp->nfl_blockcnt) {
4359 /* wake up anyone blocked on this lock */
4360 wakeup(nflp);
4361 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4362 /* remove nflp from lock list and destroy */
4363 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4364 nfs_file_lock_destroy(nflp);
4365 }
4366 }
4367
4368 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4369
4370 /*
4371 * POSIX locks should be coalesced when possible.
4372 */
4373 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4374 /*
4375 * Walk through the lock queue and check each of our held locks with
4376 * the previous and next locks in the lock owner's "held lock list".
4377 * If the two locks can be coalesced, we merge the current lock into
4378 * the other (previous or next) lock. Merging this way makes sure that
4379 * lock ranges are always merged forward in the lock queue. This is
4380 * important because anyone blocked on the lock being "merged away"
4381 * will still need to block on that range and it will simply continue
4382 * checking locks that are further down the list.
4383 */
4384 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4385 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4386 continue;
4387 }
4388 if (nflp->nfl_owner != nlop) {
4389 continue;
4390 }
4391 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4392 continue;
4393 }
4394 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4395 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4396 (coalnflp->nfl_type == nflp->nfl_type) &&
4397 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4398 coalnflp->nfl_end = nflp->nfl_end;
4399 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4400 lck_mtx_lock(&nlop->nlo_lock);
4401 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4402 lck_mtx_unlock(&nlop->nlo_lock);
4403 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4404 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4405 (coalnflp->nfl_type == nflp->nfl_type) &&
4406 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4407 coalnflp->nfl_start = nflp->nfl_start;
4408 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4409 lck_mtx_lock(&nlop->nlo_lock);
4410 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4411 lck_mtx_unlock(&nlop->nlo_lock);
4412 }
4413 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4414 continue;
4415 }
4416 if (nflp->nfl_blockcnt) {
4417 /* wake up anyone blocked on this lock */
4418 wakeup(nflp);
4419 } else {
4420 /* remove nflp from lock list and destroy */
4421 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4422 nfs_file_lock_destroy(nflp);
4423 }
4424 }
4425 }
4426
4427 lck_mtx_unlock(&np->n_openlock);
4428 nfs_open_state_clear_busy(np);
4429
4430 if (inuse) {
4431 nfs_mount_state_in_use_end(nmp, error);
4432 }
4433 if (nflp2) {
4434 nfs_file_lock_destroy(nflp2);
4435 }
4436 return error;
4437}
4438
4439/*
4440 * Release all (same style) locks within the given range.
4441 */
4442int
4443nfs_advlock_unlock(
4444 nfsnode_t np,
4445 struct nfs_open_file *nofp
4446#if !CONFIG_NFS4
4447 __unused
4448#endif
4449 ,
4450 struct nfs_lock_owner *nlop,
4451 uint64_t start,
4452 uint64_t end,
4453 int style,
4454 vfs_context_t ctx)
4455{
4456 struct nfsmount *nmp;
4457 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4458 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4459
4460 nmp = NFSTONMP(np);
4461 if (nfs_mount_gone(nmp)) {
4462 return ENXIO;
4463 }
4464
4465restart:
4466 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4467 return error;
4468 }
4469#if CONFIG_NFS4
4470 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4471 nfs_mount_state_in_use_end(nmp, 0);
4472 error = nfs4_reopen(nofp, NULL);
4473 if (error) {
4474 return error;
4475 }
4476 goto restart;
4477 }
4478#endif
4479 if ((error = nfs_open_state_set_busy(np, NULL))) {
4480 nfs_mount_state_in_use_end(nmp, error);
4481 return error;
4482 }
4483
4484 lck_mtx_lock(&np->n_openlock);
4485 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4486 /*
4487 * We may need to allocate a new lock if an existing lock gets split.
4488 * So, we first scan the list to check for a split, and if there's
4489 * going to be one, we'll allocate one now.
4490 */
4491 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4492 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4493 continue;
4494 }
4495 if (nflp->nfl_owner != nlop) {
4496 continue;
4497 }
4498 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4499 continue;
4500 }
4501 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4502 continue;
4503 }
4504 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4505 willsplit = 1;
4506 break;
4507 }
4508 }
4509 if (willsplit) {
4510 lck_mtx_unlock(&np->n_openlock);
4511 nfs_open_state_clear_busy(np);
4512 nfs_mount_state_in_use_end(nmp, 0);
4513 newnflp = nfs_file_lock_alloc(nlop);
4514 if (!newnflp) {
4515 return ENOMEM;
4516 }
4517 goto restart;
4518 }
4519 }
4520
4521 /*
4522 * Free all of our locks in the given range.
4523 *
4524 * Note that this process requires sending requests to the server.
4525 * Because of this, we will release the n_openlock while performing
4526 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4527 * locks from changing underneath us. However, other entries in the
4528 * list may be removed. So we need to be careful walking the list.
4529 */
4530
4531 /*
4532 * Don't unlock ranges that are held by other-style locks.
4533 * If style is posix, don't send any unlock rpcs if flock is held.
4534 * If we unlock an flock, don't send unlock rpcs for any posix-style
4535 * ranges held - instead send unlocks for the ranges not held.
4536 */
4537 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4538 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4539 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4540 send_unlock_rpcs = 0;
4541 }
4542 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4543 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4544 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4545 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4546 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4547 uint64_t s = 0;
4548 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4549 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4550 while (!delegated && nflp) {
4551 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4552 /* unlock the range preceding this lock */
4553 lck_mtx_unlock(&np->n_openlock);
4554 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4555 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4556 if (nfs_mount_state_error_should_restart(error)) {
4557 nfs_open_state_clear_busy(np);
4558 nfs_mount_state_in_use_end(nmp, error);
4559 goto restart;
4560 }
4561 lck_mtx_lock(&np->n_openlock);
4562 if (error) {
4563 goto out;
4564 }
4565 s = nflp->nfl_end + 1;
4566 }
4567 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4568 }
4569 if (!delegated) {
4570 lck_mtx_unlock(&np->n_openlock);
4571 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4572 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4573 if (nfs_mount_state_error_should_restart(error)) {
4574 nfs_open_state_clear_busy(np);
4575 nfs_mount_state_in_use_end(nmp, error);
4576 goto restart;
4577 }
4578 lck_mtx_lock(&np->n_openlock);
4579 if (error) {
4580 goto out;
4581 }
4582 }
4583 send_unlock_rpcs = 0;
4584 }
4585
4586 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4587 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4588 continue;
4589 }
4590 if (nflp->nfl_owner != nlop) {
4591 continue;
4592 }
4593 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4594 continue;
4595 }
4596 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4597 continue;
4598 }
4599 /* here's one to unlock */
4600 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4601 /* The entire lock is being unlocked. */
4602 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4603 lck_mtx_unlock(&np->n_openlock);
4604 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4605 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4606 if (nfs_mount_state_error_should_restart(error)) {
4607 nfs_open_state_clear_busy(np);
4608 nfs_mount_state_in_use_end(nmp, error);
4609 goto restart;
4610 }
4611 lck_mtx_lock(&np->n_openlock);
4612 }
4613 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4614 if (error) {
4615 break;
4616 }
4617 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4618 lck_mtx_lock(&nlop->nlo_lock);
4619 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4620 lck_mtx_unlock(&nlop->nlo_lock);
4621 /* lock will be destroyed below, if no waiters */
4622 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4623 /* We're unlocking a range in the middle of a lock. */
4624 /* The current lock will be split into two locks. */
4625 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4626 lck_mtx_unlock(&np->n_openlock);
4627 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4628 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4629 if (nfs_mount_state_error_should_restart(error)) {
4630 nfs_open_state_clear_busy(np);
4631 nfs_mount_state_in_use_end(nmp, error);
4632 goto restart;
4633 }
4634 lck_mtx_lock(&np->n_openlock);
4635 }
4636 if (error) {
4637 break;
4638 }
4639 /* update locks and insert new lock after current lock */
4640 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4641 newnflp->nfl_type = nflp->nfl_type;
4642 newnflp->nfl_start = end + 1;
4643 newnflp->nfl_end = nflp->nfl_end;
4644 nflp->nfl_end = start - 1;
4645 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4646 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4647 nextnflp = newnflp;
4648 newnflp = NULL;
4649 } else if (start > nflp->nfl_start) {
4650 /* We're unlocking the end of a lock. */
4651 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4652 lck_mtx_unlock(&np->n_openlock);
4653 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4654 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4655 if (nfs_mount_state_error_should_restart(error)) {
4656 nfs_open_state_clear_busy(np);
4657 nfs_mount_state_in_use_end(nmp, error);
4658 goto restart;
4659 }
4660 lck_mtx_lock(&np->n_openlock);
4661 }
4662 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4663 if (error) {
4664 break;
4665 }
4666 nflp->nfl_end = start - 1;
4667 } else if (end < nflp->nfl_end) {
4668 /* We're unlocking the start of a lock. */
4669 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4670 lck_mtx_unlock(&np->n_openlock);
4671 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4672 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4673 if (nfs_mount_state_error_should_restart(error)) {
4674 nfs_open_state_clear_busy(np);
4675 nfs_mount_state_in_use_end(nmp, error);
4676 goto restart;
4677 }
4678 lck_mtx_lock(&np->n_openlock);
4679 }
4680 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4681 if (error) {
4682 break;
4683 }
4684 nflp->nfl_start = end + 1;
4685 }
4686 if (nflp->nfl_blockcnt) {
4687 /* wake up anyone blocked on this lock */
4688 wakeup(nflp);
4689 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4690 /* remove nflp from lock list and destroy */
4691 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4692 nfs_file_lock_destroy(nflp);
4693 }
4694 }
4695out:
4696 lck_mtx_unlock(&np->n_openlock);
4697 nfs_open_state_clear_busy(np);
4698 nfs_mount_state_in_use_end(nmp, 0);
4699
4700 if (newnflp) {
4701 nfs_file_lock_destroy(newnflp);
4702 }
4703 return error;
4704}
4705
4706/*
4707 * NFSv4 advisory file locking
4708 */
4709int
4710nfs_vnop_advlock(
4711 struct vnop_advlock_args /* {
4712 * struct vnodeop_desc *a_desc;
4713 * vnode_t a_vp;
4714 * caddr_t a_id;
4715 * int a_op;
4716 * struct flock *a_fl;
4717 * int a_flags;
4718 * vfs_context_t a_context;
4719 * } */*ap)
4720{
4721 vnode_t vp = ap->a_vp;
4722 nfsnode_t np = VTONFS(ap->a_vp);
4723 struct flock *fl = ap->a_fl;
4724 int op = ap->a_op;
4725 int flags = ap->a_flags;
4726 vfs_context_t ctx = ap->a_context;
4727 struct nfsmount *nmp;
4728 struct nfs_open_owner *noop = NULL;
4729 struct nfs_open_file *nofp = NULL;
4730 struct nfs_lock_owner *nlop = NULL;
4731 off_t lstart;
4732 uint64_t start, end;
4733 int error = 0, modified, style;
4734 enum vtype vtype;
4735#define OFF_MAX QUAD_MAX
4736
4737 nmp = VTONMP(ap->a_vp);
4738 if (nfs_mount_gone(nmp)) {
4739 return ENXIO;
4740 }
4741 lck_mtx_lock(&nmp->nm_lock);
4742 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4743 lck_mtx_unlock(&nmp->nm_lock);
4744 return ENOTSUP;
4745 }
4746 lck_mtx_unlock(&nmp->nm_lock);
4747
4748 if (np->n_flag & NREVOKE) {
4749 return EIO;
4750 }
4751 vtype = vnode_vtype(ap->a_vp);
4752 if (vtype == VDIR) { /* ignore lock requests on directories */
4753 return 0;
4754 }
4755 if (vtype != VREG) { /* anything other than regular files is invalid */
4756 return EINVAL;
4757 }
4758
4759 /* Convert the flock structure into a start and end. */
4760 switch (fl->l_whence) {
4761 case SEEK_SET:
4762 case SEEK_CUR:
4763 /*
4764 * Caller is responsible for adding any necessary offset
4765 * to fl->l_start when SEEK_CUR is used.
4766 */
4767 lstart = fl->l_start;
4768 break;
4769 case SEEK_END:
4770 /* need to flush, and refetch attributes to make */
4771 /* sure we have the correct end of file offset */
4772 if ((error = nfs_node_lock(np))) {
4773 return error;
4774 }
4775 modified = (np->n_flag & NMODIFIED);
4776 nfs_node_unlock(np);
4777 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4778 return error;
4779 }
4780 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4781 return error;
4782 }
4783 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4784 if ((np->n_size > OFF_MAX) ||
4785 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4786 error = EOVERFLOW;
4787 }
4788 lstart = np->n_size + fl->l_start;
4789 nfs_data_unlock(np);
4790 if (error) {
4791 return error;
4792 }
4793 break;
4794 default:
4795 return EINVAL;
4796 }
4797 if (lstart < 0) {
4798 return EINVAL;
4799 }
4800 start = lstart;
4801 if (fl->l_len == 0) {
4802 end = UINT64_MAX;
4803 } else if (fl->l_len > 0) {
4804 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4805 return EOVERFLOW;
4806 }
4807 end = start - 1 + fl->l_len;
4808 } else { /* l_len is negative */
4809 if ((lstart + fl->l_len) < 0) {
4810 return EINVAL;
4811 }
4812 end = start - 1;
4813 start += fl->l_len;
4814 }
4815 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4816 return EINVAL;
4817 }
4818
4819 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4820 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4821 return EINVAL;
4822 }
4823
4824 /* find the lock owner, alloc if not unlock */
4825 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4826 if (!nlop) {
4827 error = (op == F_UNLCK) ? 0 : ENOMEM;
4828 if (error) {
4829 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4830 }
4831 goto out;
4832 }
4833
4834 if (op == F_GETLK) {
4835 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4836 } else {
4837 /* find the open owner */
4838 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4839 if (!noop) {
4840 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4841 error = EPERM;
4842 goto out;
4843 }
4844 /* find the open file */
4845#if CONFIG_NFS4
4846restart:
4847#endif
4848 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4849 if (error) {
4850 error = EBADF;
4851 }
4852 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4853 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4854 error = EIO;
4855 }
4856#if CONFIG_NFS4
4857 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4858 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4859 nofp = NULL;
4860 if (!error) {
4861 goto restart;
4862 }
4863 }
4864#endif
4865 if (error) {
4866 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4867 goto out;
4868 }
4869 if (op == F_UNLCK) {
4870 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4871 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4872 if ((op == F_SETLK) && (flags & F_WAIT)) {
4873 op = F_SETLKW;
4874 }
4875 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4876 } else {
4877 /* not getlk, unlock or lock? */
4878 error = EINVAL;
4879 }
4880 }
4881
4882out:
4883 if (nlop) {
4884 nfs_lock_owner_rele(nlop);
4885 }
4886 if (noop) {
4887 nfs_open_owner_rele(noop);
4888 }
4889 return error;
4890}
4891
4892/*
4893 * Check if an open owner holds any locks on a file.
4894 */
4895int
4896nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4897{
4898 struct nfs_lock_owner *nlop;
4899
4900 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4901 if (nlop->nlo_open_owner != noop) {
4902 continue;
4903 }
4904 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4905 break;
4906 }
4907 }
4908 return nlop ? 1 : 0;
4909}
4910
4911#if CONFIG_NFS4
4912/*
4913 * Reopen simple (no deny, no locks) open state that was lost.
4914 */
4915int
4916nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4917{
4918 struct nfs_open_owner *noop = nofp->nof_owner;
4919 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4920 nfsnode_t np = nofp->nof_np;
4921 vnode_t vp = NFSTOV(np);
4922 vnode_t dvp = NULL;
4923 struct componentname cn;
4924 const char *vname = NULL;
4925 const char *name = NULL;
4926 uint32_t namelen;
4927 char smallname[128];
4928 char *filename = NULL;
4929 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4930 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4931
4932 lck_mtx_lock(&nofp->nof_lock);
4933 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4934 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4935 break;
4936 }
4937 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4938 slpflag = 0;
4939 }
4940 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4941 lck_mtx_unlock(&nofp->nof_lock);
4942 return error;
4943 }
4944 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4945 lck_mtx_unlock(&nofp->nof_lock);
4946
4947 nfs_node_lock_force(np);
4948 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4949 /*
4950 * The node's been sillyrenamed, so we need to use
4951 * the sillyrename directory/name to do the open.
4952 */
4953 struct nfs_sillyrename *nsp = np->n_sillyrename;
4954 dvp = NFSTOV(nsp->nsr_dnp);
4955 if ((error = vnode_get(dvp))) {
4956 dvp = NULLVP;
4957 nfs_node_unlock(np);
4958 goto out;
4959 }
4960 name = nsp->nsr_name;
4961 } else {
4962 /*
4963 * [sigh] We can't trust VFS to get the parent right for named
4964 * attribute nodes. (It likes to reparent the nodes after we've
4965 * created them.) Luckily we can probably get the right parent
4966 * from the n_parent we have stashed away.
4967 */
4968 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4969 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
4970 dvp = NULL;
4971 }
4972 if (!dvp) {
4973 dvp = vnode_getparent(vp);
4974 }
4975 vname = vnode_getname(vp);
4976 if (!dvp || !vname) {
4977 if (!error) {
4978 error = EIO;
4979 }
4980 nfs_node_unlock(np);
4981 goto out;
4982 }
4983 name = vname;
4984 }
4985 filename = &smallname[0];
4986 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4987 if (namelen >= sizeof(smallname)) {
4988 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
4989 if (!filename) {
4990 error = ENOMEM;
4991 goto out;
4992 }
4993 snprintf(filename, namelen + 1, "%s", name);
4994 }
4995 nfs_node_unlock(np);
4996 bzero(&cn, sizeof(cn));
4997 cn.cn_nameptr = filename;
4998 cn.cn_namelen = namelen;
4999
5000restart:
5001 done = 0;
5002 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
5003 goto out;
5004 }
5005
5006 if (nofp->nof_rw) {
5007 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
5008 }
5009 if (!error && nofp->nof_w) {
5010 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
5011 }
5012 if (!error && nofp->nof_r) {
5013 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
5014 }
5015
5016 if (nfs_mount_state_in_use_end(nmp, error)) {
5017 if (error == NFSERR_GRACE) {
5018 goto restart;
5019 }
5020 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
5021 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5022 error = 0;
5023 goto out;
5024 }
5025 done = 1;
5026out:
5027 if (error && (error != EINTR) && (error != ERESTART)) {
5028 nfs_revoke_open_state_for_node(np);
5029 }
5030 lck_mtx_lock(&nofp->nof_lock);
5031 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
5032 if (done) {
5033 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5034 } else if (error) {
5035 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
5036 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
5037 }
5038 lck_mtx_unlock(&nofp->nof_lock);
5039 if (filename && (filename != &smallname[0])) {
5040 FREE(filename, M_TEMP);
5041 }
5042 if (vname) {
5043 vnode_putname(vname);
5044 }
5045 if (dvp != NULLVP) {
5046 vnode_put(dvp);
5047 }
5048 return error;
5049}
5050
5051/*
5052 * Send a normal OPEN RPC to open/create a file.
5053 */
5054int
5055nfs4_open_rpc(
5056 struct nfs_open_file *nofp,
5057 vfs_context_t ctx,
5058 struct componentname *cnp,
5059 struct vnode_attr *vap,
5060 vnode_t dvp,
5061 vnode_t *vpp,
5062 int create,
5063 int share_access,
5064 int share_deny)
5065{
5066 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5067 cnp, vap, dvp, vpp, create, share_access, share_deny);
5068}
5069
5070/*
5071 * Send an OPEN RPC to reopen a file.
5072 */
5073int
5074nfs4_open_reopen_rpc(
5075 struct nfs_open_file *nofp,
5076 thread_t thd,
5077 kauth_cred_t cred,
5078 struct componentname *cnp,
5079 vnode_t dvp,
5080 vnode_t *vpp,
5081 int share_access,
5082 int share_deny)
5083{
5084 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
5085}
5086
5087/*
5088 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5089 */
5090int
5091nfs4_open_confirm_rpc(
5092 struct nfsmount *nmp,
5093 nfsnode_t dnp,
5094 u_char *fhp,
5095 int fhlen,
5096 struct nfs_open_owner *noop,
5097 nfs_stateid *sid,
5098 thread_t thd,
5099 kauth_cred_t cred,
5100 struct nfs_vattr *nvap,
5101 uint64_t *xidp)
5102{
5103 struct nfsm_chain nmreq, nmrep;
5104 int error = 0, status, numops;
5105 struct nfsreq_secinfo_args si;
5106
5107 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5108 nfsm_chain_null(&nmreq);
5109 nfsm_chain_null(&nmrep);
5110
5111 // PUTFH, OPEN_CONFIRM, GETATTR
5112 numops = 3;
5113 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5114 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5115 numops--;
5116 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5117 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5118 numops--;
5119 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5120 nfsm_chain_add_stateid(error, &nmreq, sid);
5121 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5122 numops--;
5123 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5124 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5125 nfsm_chain_build_done(error, &nmreq);
5126 nfsm_assert(error, (numops == 0), EPROTO);
5127 nfsmout_if(error);
5128 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5129
5130 nfsm_chain_skip_tag(error, &nmrep);
5131 nfsm_chain_get_32(error, &nmrep, numops);
5132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5133 nfsmout_if(error);
5134 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5135 nfs_owner_seqid_increment(noop, NULL, error);
5136 nfsm_chain_get_stateid(error, &nmrep, sid);
5137 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5138 nfsmout_if(error);
5139 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5140nfsmout:
5141 nfsm_chain_cleanup(&nmreq);
5142 nfsm_chain_cleanup(&nmrep);
5143 return error;
5144}
5145
5146/*
5147 * common OPEN RPC code
5148 *
5149 * If create is set, ctx must be passed in.
5150 * Returns a node on success if no node passed in.
5151 */
5152int
5153nfs4_open_rpc_internal(
5154 struct nfs_open_file *nofp,
5155 vfs_context_t ctx,
5156 thread_t thd,
5157 kauth_cred_t cred,
5158 struct componentname *cnp,
5159 struct vnode_attr *vap,
5160 vnode_t dvp,
5161 vnode_t *vpp,
5162 int create,
5163 int share_access,
5164 int share_deny)
5165{
5166 struct nfsmount *nmp;
5167 struct nfs_open_owner *noop = nofp->nof_owner;
5168 struct nfs_vattr *nvattr;
5169 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5170 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
5171 u_int64_t xid, savedxid = 0;
5172 nfsnode_t dnp = VTONFS(dvp);
5173 nfsnode_t np, newnp = NULL;
5174 vnode_t newvp = NULL;
5175 struct nfsm_chain nmreq, nmrep;
5176 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5177 uint32_t rflags, delegation, recall;
5178 struct nfs_stateid stateid, dstateid, *sid;
5179 fhandle_t *fh;
5180 struct nfsreq *req;
5181 struct nfs_dulookup *dul;
5182 char sbuf[64], *s;
5183 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5184 struct kauth_ace ace;
5185 struct nfsreq_secinfo_args si;
5186
5187 if (create && !ctx) {
5188 return EINVAL;
5189 }
5190
5191 nmp = VTONMP(dvp);
5192 if (nfs_mount_gone(nmp)) {
5193 return ENXIO;
5194 }
5195 nfsvers = nmp->nm_vers;
5196 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5197 bzero(&dstateid, sizeof(dstateid));
5198 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5199 return EINVAL;
5200 }
5201
5202 np = *vpp ? VTONFS(*vpp) : NULL;
5203 if (create && vap) {
5204 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5205 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5206 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5207 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5208 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5209 vap->va_vaflags |= VA_UTIMES_NULL;
5210 }
5211 } else {
5212 exclusive = gotuid = gotgid = 0;
5213 }
5214 if (nofp) {
5215 sid = &nofp->nof_stateid;
5216 } else {
5217 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5218 sid = &stateid;
5219 }
5220
5221 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5222 return error;
5223 }
5224
5225 fh = zalloc(nfs_fhandle_zone);
5226 req = zalloc(nfs_req_zone);
5227 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
5228 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5229
5230again:
5231 rflags = delegation = recall = 0;
5232 ace.ace_flags = 0;
5233 s = sbuf;
5234 slen = sizeof(sbuf);
5235 NVATTR_INIT(nvattr);
5236 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5237
5238 nfsm_chain_null(&nmreq);
5239 nfsm_chain_null(&nmrep);
5240
5241 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5242 numops = 6;
5243 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5244 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5245 numops--;
5246 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5247 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5248 numops--;
5249 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5250 numops--;
5251 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5252 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5253 nfsm_chain_add_32(error, &nmreq, share_access);
5254 nfsm_chain_add_32(error, &nmreq, share_deny);
5255 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
5256 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5257 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
5258 nfsm_chain_add_32(error, &nmreq, create);
5259 if (create) {
5260 if (exclusive) {
5261 static uint32_t create_verf; // XXX need a better verifier
5262 create_verf++;
5263 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5264 /* insert 64 bit verifier */
5265 nfsm_chain_add_32(error, &nmreq, create_verf);
5266 nfsm_chain_add_32(error, &nmreq, create_verf);
5267 } else {
5268 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5269 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5270 }
5271 }
5272 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5273 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5274 numops--;
5275 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5276 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5277 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5278 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5279 numops--;
5280 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5281 numops--;
5282 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5283 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5284 nfsm_chain_build_done(error, &nmreq);
5285 nfsm_assert(error, (numops == 0), EPROTO);
5286 if (!error) {
5287 error = busyerror = nfs_node_set_busy(dnp, thd);
5288 }
5289 nfsmout_if(error);
5290
5291 if (create && !namedattrs) {
5292 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5293 }
5294
5295 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
5296 if (!error) {
5297 if (create && !namedattrs) {
5298 nfs_dulookup_start(dul, dnp, ctx);
5299 }
5300 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5301 savedxid = xid;
5302 }
5303
5304 if (create && !namedattrs) {
5305 nfs_dulookup_finish(dul, dnp, ctx);
5306 }
5307
5308 if ((lockerror = nfs_node_lock(dnp))) {
5309 error = lockerror;
5310 }
5311 nfsm_chain_skip_tag(error, &nmrep);
5312 nfsm_chain_get_32(error, &nmrep, numops);
5313 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5314 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5315 nfsmout_if(error);
5316 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5317 nfs_owner_seqid_increment(noop, NULL, error);
5318 nfsm_chain_get_stateid(error, &nmrep, sid);
5319 nfsm_chain_check_change_info(error, &nmrep, dnp);
5320 nfsm_chain_get_32(error, &nmrep, rflags);
5321 bmlen = NFS_ATTR_BITMAP_LEN;
5322 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5323 nfsm_chain_get_32(error, &nmrep, delegation);
5324 if (!error) {
5325 switch (delegation) {
5326 case NFS_OPEN_DELEGATE_NONE:
5327 break;
5328 case NFS_OPEN_DELEGATE_READ:
5329 case NFS_OPEN_DELEGATE_WRITE:
5330 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5331 nfsm_chain_get_32(error, &nmrep, recall);
5332 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5333 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5334 }
5335 /* if we have any trouble accepting the ACE, just invalidate it */
5336 ace_type = ace_flags = ace_mask = len = 0;
5337 nfsm_chain_get_32(error, &nmrep, ace_type);
5338 nfsm_chain_get_32(error, &nmrep, ace_flags);
5339 nfsm_chain_get_32(error, &nmrep, ace_mask);
5340 nfsm_chain_get_32(error, &nmrep, len);
5341 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5342 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5343 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5344 if (!error && (len >= slen)) {
5345 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5346 if (s) {
5347 slen = len + 1;
5348 } else {
5349 ace.ace_flags = 0;
5350 }
5351 }
5352 if (s) {
5353 nfsm_chain_get_opaque(error, &nmrep, len, s);
5354 } else {
5355 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5356 }
5357 if (!error && s) {
5358 s[len] = '\0';
5359 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5360 ace.ace_flags = 0;
5361 }
5362 }
5363 if (error || !s) {
5364 ace.ace_flags = 0;
5365 }
5366 if (s && (s != sbuf)) {
5367 FREE(s, M_TEMP);
5368 }
5369 break;
5370 default:
5371 error = EBADRPC;
5372 break;
5373 }
5374 }
5375 /* At this point if we have no error, the object was created/opened. */
5376 open_error = error;
5377 nfsmout_if(error);
5378 if (create && vap && !exclusive) {
5379 nfs_vattr_set_supported(bitmap, vap);
5380 }
5381 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5382 nfsmout_if(error);
5383 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5384 nfsmout_if(error);
5385 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5386 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5387 error = EBADRPC;
5388 goto nfsmout;
5389 }
5390 if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5391 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5392 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5393 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5394 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5395 }
5396 }
5397 /* directory attributes: if we don't get them, make sure to invalidate */
5398 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5399 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5400 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5401 if (error) {
5402 NATTRINVALIDATE(dnp);
5403 }
5404 nfsmout_if(error);
5405
5406 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5407 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5408 }
5409
5410 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5411 nfs_node_unlock(dnp);
5412 lockerror = ENOENT;
5413 NVATTR_CLEANUP(nvattr);
5414 error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid);
5415 nfsmout_if(error);
5416 savedxid = xid;
5417 if ((lockerror = nfs_node_lock(dnp))) {
5418 error = lockerror;
5419 }
5420 }
5421
5422nfsmout:
5423 nfsm_chain_cleanup(&nmreq);
5424 nfsm_chain_cleanup(&nmrep);
5425
5426 if (!lockerror && create) {
5427 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5428 dnp->n_flag &= ~NNEGNCENTRIES;
5429 cache_purge_negatives(dvp);
5430 }
5431 dnp->n_flag |= NMODIFIED;
5432 nfs_node_unlock(dnp);
5433 lockerror = ENOENT;
5434 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5435 }
5436 if (!lockerror) {
5437 nfs_node_unlock(dnp);
5438 }
5439 if (!error && !np && fh->fh_len) {
5440 /* create the vnode with the filehandle and attributes */
5441 xid = savedxid;
5442 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp);
5443 if (!error) {
5444 newvp = NFSTOV(newnp);
5445 }
5446 }
5447 NVATTR_CLEANUP(nvattr);
5448 if (!busyerror) {
5449 nfs_node_clear_busy(dnp);
5450 }
5451 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5452 if (!np) {
5453 np = newnp;
5454 }
5455 if (!error && np && !recall) {
5456 /* stuff the delegation state in the node */
5457 lck_mtx_lock(&np->n_openlock);
5458 np->n_openflags &= ~N_DELEG_MASK;
5459 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5460 np->n_dstateid = dstateid;
5461 np->n_dace = ace;
5462 if (np->n_dlink.tqe_next == NFSNOLIST) {
5463 lck_mtx_lock(&nmp->nm_lock);
5464 if (np->n_dlink.tqe_next == NFSNOLIST) {
5465 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5466 }
5467 lck_mtx_unlock(&nmp->nm_lock);
5468 }
5469 lck_mtx_unlock(&np->n_openlock);
5470 } else {
5471 /* give the delegation back */
5472 if (np) {
5473 if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5474 /* update delegation state and return it */
5475 lck_mtx_lock(&np->n_openlock);
5476 np->n_openflags &= ~N_DELEG_MASK;
5477 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5478 np->n_dstateid = dstateid;
5479 np->n_dace = ace;
5480 if (np->n_dlink.tqe_next == NFSNOLIST) {
5481 lck_mtx_lock(&nmp->nm_lock);
5482 if (np->n_dlink.tqe_next == NFSNOLIST) {
5483 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5484 }
5485 lck_mtx_unlock(&nmp->nm_lock);
5486 }
5487 lck_mtx_unlock(&np->n_openlock);
5488 /* don't need to send a separate delegreturn for fh */
5489 fh->fh_len = 0;
5490 }
5491 /* return np's current delegation */
5492 nfs4_delegation_return(np, 0, thd, cred);
5493 }
5494 if (fh->fh_len) { /* return fh's delegation if it wasn't for np */
5495 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
5496 }
5497 }
5498 }
5499 if (error) {
5500 if (exclusive && (error == NFSERR_NOTSUPP)) {
5501 exclusive = 0;
5502 goto again;
5503 }
5504 if (newvp) {
5505 nfs_node_unlock(newnp);
5506 vnode_put(newvp);
5507 }
5508 } else if (create) {
5509 nfs_node_unlock(newnp);
5510 if (exclusive) {
5511 error = nfs4_setattr_rpc(newnp, vap, ctx);
5512 if (error && (gotuid || gotgid)) {
5513 /* it's possible the server didn't like our attempt to set IDs. */
5514 /* so, let's try it again without those */
5515 VATTR_CLEAR_ACTIVE(vap, va_uid);
5516 VATTR_CLEAR_ACTIVE(vap, va_gid);
5517 error = nfs4_setattr_rpc(newnp, vap, ctx);
5518 }
5519 }
5520 if (error) {
5521 vnode_put(newvp);
5522 } else {
5523 *vpp = newvp;
5524 }
5525 }
5526 nfs_open_owner_clear_busy(noop);
5527 NFS_ZFREE(nfs_fhandle_zone, fh);
5528 NFS_ZFREE(nfs_req_zone, req);
5529 FREE(dul, M_TEMP);
5530 FREE(nvattr, M_TEMP);
5531 return error;
5532}
5533
5534
5535/*
5536 * Send an OPEN RPC to claim a delegated open for a file
5537 */
5538int
5539nfs4_claim_delegated_open_rpc(
5540 struct nfs_open_file *nofp,
5541 int share_access,
5542 int share_deny,
5543 int flags)
5544{
5545 struct nfsmount *nmp;
5546 struct nfs_open_owner *noop = nofp->nof_owner;
5547 struct nfs_vattr *nvattr;
5548 int error = 0, lockerror = ENOENT, status;
5549 int nfsvers, numops;
5550 u_int64_t xid;
5551 nfsnode_t np = nofp->nof_np;
5552 struct nfsm_chain nmreq, nmrep;
5553 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5554 uint32_t rflags = 0, delegation, recall = 0;
5555 fhandle_t *fh;
5556 struct nfs_stateid dstateid;
5557 char sbuf[64], *s = sbuf;
5558 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5559 struct kauth_ace ace;
5560 vnode_t dvp = NULL;
5561 const char *vname = NULL;
5562 const char *name = NULL;
5563 uint32_t namelen;
5564 char smallname[128];
5565 char *filename = NULL;
5566 struct nfsreq_secinfo_args si;
5567
5568 nmp = NFSTONMP(np);
5569 if (nfs_mount_gone(nmp)) {
5570 return ENXIO;
5571 }
5572 fh = zalloc(nfs_fhandle_zone);
5573 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5574 nfsvers = nmp->nm_vers;
5575
5576 nfs_node_lock_force(np);
5577 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5578 /*
5579 * The node's been sillyrenamed, so we need to use
5580 * the sillyrename directory/name to do the open.
5581 */
5582 struct nfs_sillyrename *nsp = np->n_sillyrename;
5583 dvp = NFSTOV(nsp->nsr_dnp);
5584 if ((error = vnode_get(dvp))) {
5585 dvp = NULLVP;
5586 nfs_node_unlock(np);
5587 goto out;
5588 }
5589 name = nsp->nsr_name;
5590 } else {
5591 /*
5592 * [sigh] We can't trust VFS to get the parent right for named
5593 * attribute nodes. (It likes to reparent the nodes after we've
5594 * created them.) Luckily we can probably get the right parent
5595 * from the n_parent we have stashed away.
5596 */
5597 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5598 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5599 dvp = NULL;
5600 }
5601 if (!dvp) {
5602 dvp = vnode_getparent(NFSTOV(np));
5603 }
5604 vname = vnode_getname(NFSTOV(np));
5605 if (!dvp || !vname) {
5606 if (!error) {
5607 error = EIO;
5608 }
5609 nfs_node_unlock(np);
5610 goto out;
5611 }
5612 name = vname;
5613 }
5614 filename = &smallname[0];
5615 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5616 if (namelen >= sizeof(smallname)) {
5617 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
5618 if (!filename) {
5619 error = ENOMEM;
5620 nfs_node_unlock(np);
5621 goto out;
5622 }
5623 snprintf(filename, namelen + 1, "%s", name);
5624 }
5625 nfs_node_unlock(np);
5626
5627 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5628 goto out;
5629 }
5630 NVATTR_INIT(nvattr);
5631 delegation = NFS_OPEN_DELEGATE_NONE;
5632 dstateid = np->n_dstateid;
5633 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5634
5635 nfsm_chain_null(&nmreq);
5636 nfsm_chain_null(&nmrep);
5637
5638 // PUTFH, OPEN, GETATTR(FH)
5639 numops = 3;
5640 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5641 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5642 numops--;
5643 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5644 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5645 numops--;
5646 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5647 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5648 nfsm_chain_add_32(error, &nmreq, share_access);
5649 nfsm_chain_add_32(error, &nmreq, share_deny);
5650 // open owner: clientid + uid
5651 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5652 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5653 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5654 // openflag4
5655 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5656 // open_claim4
5657 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5658 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5659 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5660 numops--;
5661 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5662 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5663 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5664 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5665 nfsm_chain_build_done(error, &nmreq);
5666 nfsm_assert(error, (numops == 0), EPROTO);
5667 nfsmout_if(error);
5668
5669 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5670 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5671
5672 if ((lockerror = nfs_node_lock(np))) {
5673 error = lockerror;
5674 }
5675 nfsm_chain_skip_tag(error, &nmrep);
5676 nfsm_chain_get_32(error, &nmrep, numops);
5677 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5678 nfsmout_if(error);
5679 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5680 nfs_owner_seqid_increment(noop, NULL, error);
5681 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5682 nfsm_chain_check_change_info(error, &nmrep, np);
5683 nfsm_chain_get_32(error, &nmrep, rflags);
5684 bmlen = NFS_ATTR_BITMAP_LEN;
5685 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5686 nfsm_chain_get_32(error, &nmrep, delegation);
5687 if (!error) {
5688 switch (delegation) {
5689 case NFS_OPEN_DELEGATE_NONE:
5690 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5691 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5692 break;
5693 case NFS_OPEN_DELEGATE_READ:
5694 case NFS_OPEN_DELEGATE_WRITE:
5695 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5696 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5697 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5698 (delegation == NFS_OPEN_DELEGATE_READ))) {
5699 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5700 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5701 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5702 }
5703 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5704 nfsm_chain_get_32(error, &nmrep, recall);
5705 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5706 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5707 }
5708 /* if we have any trouble accepting the ACE, just invalidate it */
5709 ace_type = ace_flags = ace_mask = len = 0;
5710 nfsm_chain_get_32(error, &nmrep, ace_type);
5711 nfsm_chain_get_32(error, &nmrep, ace_flags);
5712 nfsm_chain_get_32(error, &nmrep, ace_mask);
5713 nfsm_chain_get_32(error, &nmrep, len);
5714 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5715 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5716 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5717 if (!error && (len >= slen)) {
5718 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5719 if (s) {
5720 slen = len + 1;
5721 } else {
5722 ace.ace_flags = 0;
5723 }
5724 }
5725 if (s) {
5726 nfsm_chain_get_opaque(error, &nmrep, len, s);
5727 } else {
5728 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5729 }
5730 if (!error && s) {
5731 s[len] = '\0';
5732 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5733 ace.ace_flags = 0;
5734 }
5735 }
5736 if (error || !s) {
5737 ace.ace_flags = 0;
5738 }
5739 if (s && (s != sbuf)) {
5740 FREE(s, M_TEMP);
5741 }
5742 if (!error) {
5743 /* stuff the latest delegation state in the node */
5744 lck_mtx_lock(&np->n_openlock);
5745 np->n_openflags &= ~N_DELEG_MASK;
5746 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5747 np->n_dstateid = dstateid;
5748 np->n_dace = ace;
5749 if (np->n_dlink.tqe_next == NFSNOLIST) {
5750 lck_mtx_lock(&nmp->nm_lock);
5751 if (np->n_dlink.tqe_next == NFSNOLIST) {
5752 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5753 }
5754 lck_mtx_unlock(&nmp->nm_lock);
5755 }
5756 lck_mtx_unlock(&np->n_openlock);
5757 }
5758 break;
5759 default:
5760 error = EBADRPC;
5761 break;
5762 }
5763 }
5764 nfsmout_if(error);
5765 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5766 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5767 nfsmout_if(error);
5768 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5769 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5770 error = EBADRPC;
5771 goto nfsmout;
5772 }
5773 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
5774 // XXX what if fh doesn't match the vnode we think we're re-opening?
5775 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5776 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5777 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5778 }
5779 }
5780 error = nfs_loadattrcache(np, nvattr, &xid, 1);
5781 nfsmout_if(error);
5782 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5783 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5784 }
5785nfsmout:
5786 NVATTR_CLEANUP(nvattr);
5787 FREE(nvattr, M_TEMP);
5788 NFS_ZFREE(nfs_fhandle_zone, fh);
5789 nfsm_chain_cleanup(&nmreq);
5790 nfsm_chain_cleanup(&nmrep);
5791 if (!lockerror) {
5792 nfs_node_unlock(np);
5793 }
5794 nfs_open_owner_clear_busy(noop);
5795 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5796 if (recall) {
5797 /*
5798 * We're making a delegated claim.
5799 * Don't return the delegation here in case we have more to claim.
5800 * Just make sure it's queued up to be returned.
5801 */
5802 nfs4_delegation_return_enqueue(np);
5803 }
5804 }
5805out:
5806 // if (!error)
5807 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5808 if (filename && (filename != &smallname[0])) {
5809 FREE(filename, M_TEMP);
5810 }
5811 if (vname) {
5812 vnode_putname(vname);
5813 }
5814 if (dvp != NULLVP) {
5815 vnode_put(dvp);
5816 }
5817 return error;
5818}
5819
5820/*
5821 * Send an OPEN RPC to reclaim an open file.
5822 */
5823int
5824nfs4_open_reclaim_rpc(
5825 struct nfs_open_file *nofp,
5826 int share_access,
5827 int share_deny)
5828{
5829 struct nfsmount *nmp;
5830 struct nfs_open_owner *noop = nofp->nof_owner;
5831 struct nfs_vattr *nvattr;
5832 int error = 0, lockerror = ENOENT, status;
5833 int nfsvers, numops;
5834 u_int64_t xid;
5835 nfsnode_t np = nofp->nof_np;
5836 struct nfsm_chain nmreq, nmrep;
5837 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5838 uint32_t rflags = 0, delegation, recall = 0;
5839 fhandle_t *fh;
5840 struct nfs_stateid dstateid;
5841 char sbuf[64], *s = sbuf;
5842 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5843 struct kauth_ace ace;
5844 struct nfsreq_secinfo_args si;
5845
5846 nmp = NFSTONMP(np);
5847 if (nfs_mount_gone(nmp)) {
5848 return ENXIO;
5849 }
5850 nfsvers = nmp->nm_vers;
5851
5852 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5853 return error;
5854 }
5855
5856 fh = zalloc(nfs_fhandle_zone);
5857 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
5858 NVATTR_INIT(nvattr);
5859 delegation = NFS_OPEN_DELEGATE_NONE;
5860 dstateid = np->n_dstateid;
5861 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5862
5863 nfsm_chain_null(&nmreq);
5864 nfsm_chain_null(&nmrep);
5865
5866 // PUTFH, OPEN, GETATTR(FH)
5867 numops = 3;
5868 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5869 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5870 numops--;
5871 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5872 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5873 numops--;
5874 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5875 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5876 nfsm_chain_add_32(error, &nmreq, share_access);
5877 nfsm_chain_add_32(error, &nmreq, share_deny);
5878 // open owner: clientid + uid
5879 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5880 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5881 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5882 // openflag4
5883 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5884 // open_claim4
5885 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5886 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5887 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5888 NFS_OPEN_DELEGATE_NONE;
5889 nfsm_chain_add_32(error, &nmreq, delegation);
5890 delegation = NFS_OPEN_DELEGATE_NONE;
5891 numops--;
5892 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5893 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5894 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5895 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5896 nfsm_chain_build_done(error, &nmreq);
5897 nfsm_assert(error, (numops == 0), EPROTO);
5898 nfsmout_if(error);
5899
5900 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5901 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5902
5903 if ((lockerror = nfs_node_lock(np))) {
5904 error = lockerror;
5905 }
5906 nfsm_chain_skip_tag(error, &nmrep);
5907 nfsm_chain_get_32(error, &nmrep, numops);
5908 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5909 nfsmout_if(error);
5910 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5911 nfs_owner_seqid_increment(noop, NULL, error);
5912 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5913 nfsm_chain_check_change_info(error, &nmrep, np);
5914 nfsm_chain_get_32(error, &nmrep, rflags);
5915 bmlen = NFS_ATTR_BITMAP_LEN;
5916 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5917 nfsm_chain_get_32(error, &nmrep, delegation);
5918 if (!error) {
5919 switch (delegation) {
5920 case NFS_OPEN_DELEGATE_NONE:
5921 if (np->n_openflags & N_DELEG_MASK) {
5922 /*
5923 * Hey! We were supposed to get our delegation back even
5924 * if it was getting immediately recalled. Bad server!
5925 *
5926 * Just try to return the existing delegation.
5927 */
5928 // NP(np, "nfs: open reclaim didn't return delegation?");
5929 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5930 recall = 1;
5931 }
5932 break;
5933 case NFS_OPEN_DELEGATE_READ:
5934 case NFS_OPEN_DELEGATE_WRITE:
5935 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5936 nfsm_chain_get_32(error, &nmrep, recall);
5937 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5938 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5939 }
5940 /* if we have any trouble accepting the ACE, just invalidate it */
5941 ace_type = ace_flags = ace_mask = len = 0;
5942 nfsm_chain_get_32(error, &nmrep, ace_type);
5943 nfsm_chain_get_32(error, &nmrep, ace_flags);
5944 nfsm_chain_get_32(error, &nmrep, ace_mask);
5945 nfsm_chain_get_32(error, &nmrep, len);
5946 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5947 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5948 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5949 if (!error && (len >= slen)) {
5950 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5951 if (s) {
5952 slen = len + 1;
5953 } else {
5954 ace.ace_flags = 0;
5955 }
5956 }
5957 if (s) {
5958 nfsm_chain_get_opaque(error, &nmrep, len, s);
5959 } else {
5960 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5961 }
5962 if (!error && s) {
5963 s[len] = '\0';
5964 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5965 ace.ace_flags = 0;
5966 }
5967 }
5968 if (error || !s) {
5969 ace.ace_flags = 0;
5970 }
5971 if (s && (s != sbuf)) {
5972 FREE(s, M_TEMP);
5973 }
5974 if (!error) {
5975 /* stuff the delegation state in the node */
5976 lck_mtx_lock(&np->n_openlock);
5977 np->n_openflags &= ~N_DELEG_MASK;
5978 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5979 np->n_dstateid = dstateid;
5980 np->n_dace = ace;
5981 if (np->n_dlink.tqe_next == NFSNOLIST) {
5982 lck_mtx_lock(&nmp->nm_lock);
5983 if (np->n_dlink.tqe_next == NFSNOLIST) {
5984 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5985 }
5986 lck_mtx_unlock(&nmp->nm_lock);
5987 }
5988 lck_mtx_unlock(&np->n_openlock);
5989 }
5990 break;
5991 default:
5992 error = EBADRPC;
5993 break;
5994 }
5995 }
5996 nfsmout_if(error);
5997 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5998 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
5999 nfsmout_if(error);
6000 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6001 NP(np, "nfs: open reclaim didn't return filehandle?");
6002 error = EBADRPC;
6003 goto nfsmout;
6004 }
6005 if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) {
6006 // XXX what if fh doesn't match the vnode we think we're re-opening?
6007 // That should be pretty hard in this case, given that we are doing
6008 // the open reclaim using the file handle (and not a dir/name pair).
6009 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
6010 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
6011 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
6012 }
6013 }
6014 error = nfs_loadattrcache(np, nvattr, &xid, 1);
6015 nfsmout_if(error);
6016 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
6017 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6018 }
6019nfsmout:
6020 // if (!error)
6021 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
6022 NVATTR_CLEANUP(nvattr);
6023 FREE(nvattr, M_TEMP);
6024 NFS_ZFREE(nfs_fhandle_zone, fh);
6025 nfsm_chain_cleanup(&nmreq);
6026 nfsm_chain_cleanup(&nmrep);
6027 if (!lockerror) {
6028 nfs_node_unlock(np);
6029 }
6030 nfs_open_owner_clear_busy(noop);
6031 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
6032 if (recall) {
6033 nfs4_delegation_return_enqueue(np);
6034 }
6035 }
6036 return error;
6037}
6038
6039int
6040nfs4_open_downgrade_rpc(
6041 nfsnode_t np,
6042 struct nfs_open_file *nofp,
6043 vfs_context_t ctx)
6044{
6045 struct nfs_open_owner *noop = nofp->nof_owner;
6046 struct nfsmount *nmp;
6047 int error, lockerror = ENOENT, status, nfsvers, numops;
6048 struct nfsm_chain nmreq, nmrep;
6049 u_int64_t xid;
6050 struct nfsreq_secinfo_args si;
6051
6052 nmp = NFSTONMP(np);
6053 if (nfs_mount_gone(nmp)) {
6054 return ENXIO;
6055 }
6056 nfsvers = nmp->nm_vers;
6057
6058 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6059 return error;
6060 }
6061
6062 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6063 nfsm_chain_null(&nmreq);
6064 nfsm_chain_null(&nmrep);
6065
6066 // PUTFH, OPEN_DOWNGRADE, GETATTR
6067 numops = 3;
6068 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6069 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
6070 numops--;
6071 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6072 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6073 numops--;
6074 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
6075 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6076 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6077 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6078 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6079 numops--;
6080 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6081 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6082 nfsm_chain_build_done(error, &nmreq);
6083 nfsm_assert(error, (numops == 0), EPROTO);
6084 nfsmout_if(error);
6085 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6086 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6087 &si, R_NOINTR, &nmrep, &xid, &status);
6088
6089 if ((lockerror = nfs_node_lock(np))) {
6090 error = lockerror;
6091 }
6092 nfsm_chain_skip_tag(error, &nmrep);
6093 nfsm_chain_get_32(error, &nmrep, numops);
6094 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6095 nfsmout_if(error);
6096 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6097 nfs_owner_seqid_increment(noop, NULL, error);
6098 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6099 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6100 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6101nfsmout:
6102 if (!lockerror) {
6103 nfs_node_unlock(np);
6104 }
6105 nfs_open_owner_clear_busy(noop);
6106 nfsm_chain_cleanup(&nmreq);
6107 nfsm_chain_cleanup(&nmrep);
6108 return error;
6109}
6110
6111int
6112nfs4_close_rpc(
6113 nfsnode_t np,
6114 struct nfs_open_file *nofp,
6115 thread_t thd,
6116 kauth_cred_t cred,
6117 int flags)
6118{
6119 struct nfs_open_owner *noop = nofp->nof_owner;
6120 struct nfsmount *nmp;
6121 int error, lockerror = ENOENT, status, nfsvers, numops;
6122 struct nfsm_chain nmreq, nmrep;
6123 u_int64_t xid;
6124 struct nfsreq_secinfo_args si;
6125
6126 nmp = NFSTONMP(np);
6127 if (nfs_mount_gone(nmp)) {
6128 return ENXIO;
6129 }
6130 nfsvers = nmp->nm_vers;
6131
6132 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6133 return error;
6134 }
6135
6136 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6137 nfsm_chain_null(&nmreq);
6138 nfsm_chain_null(&nmrep);
6139
6140 // PUTFH, CLOSE, GETATTR
6141 numops = 3;
6142 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6143 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6144 numops--;
6145 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6146 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6147 numops--;
6148 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
6149 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6150 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6151 numops--;
6152 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6153 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6154 nfsm_chain_build_done(error, &nmreq);
6155 nfsm_assert(error, (numops == 0), EPROTO);
6156 nfsmout_if(error);
6157 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6158
6159 if ((lockerror = nfs_node_lock(np))) {
6160 error = lockerror;
6161 }
6162 nfsm_chain_skip_tag(error, &nmrep);
6163 nfsm_chain_get_32(error, &nmrep, numops);
6164 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6165 nfsmout_if(error);
6166 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6167 nfs_owner_seqid_increment(noop, NULL, error);
6168 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6169 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6170 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6171nfsmout:
6172 if (!lockerror) {
6173 nfs_node_unlock(np);
6174 }
6175 nfs_open_owner_clear_busy(noop);
6176 nfsm_chain_cleanup(&nmreq);
6177 nfsm_chain_cleanup(&nmrep);
6178 return error;
6179}
6180
6181
6182/*
6183 * Claim the delegated open combinations this open file holds.
6184 */
6185int
6186nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6187{
6188 struct nfs_open_owner *noop = nofp->nof_owner;
6189 struct nfs_lock_owner *nlop;
6190 struct nfs_file_lock *nflp, *nextnflp;
6191 struct nfsmount *nmp;
6192 int error = 0, reopen = 0;
6193
6194 if (nofp->nof_d_rw_drw) {
6195 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6196 if (!error) {
6197 lck_mtx_lock(&nofp->nof_lock);
6198 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6199 nofp->nof_d_rw_drw = 0;
6200 lck_mtx_unlock(&nofp->nof_lock);
6201 }
6202 }
6203 if (!error && nofp->nof_d_w_drw) {
6204 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6205 if (!error) {
6206 lck_mtx_lock(&nofp->nof_lock);
6207 nofp->nof_w_drw += nofp->nof_d_w_drw;
6208 nofp->nof_d_w_drw = 0;
6209 lck_mtx_unlock(&nofp->nof_lock);
6210 }
6211 }
6212 if (!error && nofp->nof_d_r_drw) {
6213 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6214 if (!error) {
6215 lck_mtx_lock(&nofp->nof_lock);
6216 nofp->nof_r_drw += nofp->nof_d_r_drw;
6217 nofp->nof_d_r_drw = 0;
6218 lck_mtx_unlock(&nofp->nof_lock);
6219 }
6220 }
6221 if (!error && nofp->nof_d_rw_dw) {
6222 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6223 if (!error) {
6224 lck_mtx_lock(&nofp->nof_lock);
6225 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6226 nofp->nof_d_rw_dw = 0;
6227 lck_mtx_unlock(&nofp->nof_lock);
6228 }
6229 }
6230 if (!error && nofp->nof_d_w_dw) {
6231 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6232 if (!error) {
6233 lck_mtx_lock(&nofp->nof_lock);
6234 nofp->nof_w_dw += nofp->nof_d_w_dw;
6235 nofp->nof_d_w_dw = 0;
6236 lck_mtx_unlock(&nofp->nof_lock);
6237 }
6238 }
6239 if (!error && nofp->nof_d_r_dw) {
6240 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6241 if (!error) {
6242 lck_mtx_lock(&nofp->nof_lock);
6243 nofp->nof_r_dw += nofp->nof_d_r_dw;
6244 nofp->nof_d_r_dw = 0;
6245 lck_mtx_unlock(&nofp->nof_lock);
6246 }
6247 }
6248 /* non-deny-mode opens may be reopened if no locks are held */
6249 if (!error && nofp->nof_d_rw) {
6250 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6251 /* for some errors, we should just try reopening the file */
6252 if (nfs_mount_state_error_delegation_lost(error)) {
6253 reopen = error;
6254 }
6255 if (!error || reopen) {
6256 lck_mtx_lock(&nofp->nof_lock);
6257 nofp->nof_rw += nofp->nof_d_rw;
6258 nofp->nof_d_rw = 0;
6259 lck_mtx_unlock(&nofp->nof_lock);
6260 }
6261 }
6262 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6263 if ((!error || reopen) && nofp->nof_d_w) {
6264 if (!error) {
6265 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6266 /* for some errors, we should just try reopening the file */
6267 if (nfs_mount_state_error_delegation_lost(error)) {
6268 reopen = error;
6269 }
6270 }
6271 if (!error || reopen) {
6272 lck_mtx_lock(&nofp->nof_lock);
6273 nofp->nof_w += nofp->nof_d_w;
6274 nofp->nof_d_w = 0;
6275 lck_mtx_unlock(&nofp->nof_lock);
6276 }
6277 }
6278 if ((!error || reopen) && nofp->nof_d_r) {
6279 if (!error) {
6280 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6281 /* for some errors, we should just try reopening the file */
6282 if (nfs_mount_state_error_delegation_lost(error)) {
6283 reopen = error;
6284 }
6285 }
6286 if (!error || reopen) {
6287 lck_mtx_lock(&nofp->nof_lock);
6288 nofp->nof_r += nofp->nof_d_r;
6289 nofp->nof_d_r = 0;
6290 lck_mtx_unlock(&nofp->nof_lock);
6291 }
6292 }
6293
6294 if (reopen) {
6295 /*
6296 * Any problems with the delegation probably indicates that we
6297 * should review/return all of our current delegation state.
6298 */
6299 if ((nmp = NFSTONMP(nofp->nof_np))) {
6300 nfs4_delegation_return_enqueue(nofp->nof_np);
6301 lck_mtx_lock(&nmp->nm_lock);
6302 nfs_need_recover(nmp, NFSERR_EXPIRED);
6303 lck_mtx_unlock(&nmp->nm_lock);
6304 }
6305 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6306 /* just reopen the file on next access */
6307 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6308 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6309 lck_mtx_lock(&nofp->nof_lock);
6310 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6311 lck_mtx_unlock(&nofp->nof_lock);
6312 return 0;
6313 }
6314 if (reopen) {
6315 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6316 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6317 }
6318 }
6319
6320 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6321 /* claim delegated locks */
6322 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6323 if (nlop->nlo_open_owner != noop) {
6324 continue;
6325 }
6326 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6327 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6328 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6329 continue;
6330 }
6331 /* skip non-delegated locks */
6332 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6333 continue;
6334 }
6335 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6336 if (error) {
6337 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6338 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6339 break;
6340 }
6341 // else {
6342 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6343 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6344 // }
6345 }
6346 if (error) {
6347 break;
6348 }
6349 }
6350 }
6351
6352 if (!error) { /* all state claimed successfully! */
6353 return 0;
6354 }
6355
6356 /* restart if it looks like a problem more than just losing the delegation */
6357 if (!nfs_mount_state_error_delegation_lost(error) &&
6358 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6359 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6360 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6361 nfs_need_reconnect(nmp);
6362 }
6363 return error;
6364 }
6365
6366 /* delegated state lost (once held but now not claimable) */
6367 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6368
6369 /*
6370 * Any problems with the delegation probably indicates that we
6371 * should review/return all of our current delegation state.
6372 */
6373 if ((nmp = NFSTONMP(nofp->nof_np))) {
6374 nfs4_delegation_return_enqueue(nofp->nof_np);
6375 lck_mtx_lock(&nmp->nm_lock);
6376 nfs_need_recover(nmp, NFSERR_EXPIRED);
6377 lck_mtx_unlock(&nmp->nm_lock);
6378 }
6379
6380 /* revoke all open file state */
6381 nfs_revoke_open_state_for_node(nofp->nof_np);
6382
6383 return error;
6384}
6385#endif /* CONFIG_NFS4*/
6386
6387/*
6388 * Release all open state for the given node.
6389 */
6390void
6391nfs_release_open_state_for_node(nfsnode_t np, int force)
6392{
6393 struct nfsmount *nmp = NFSTONMP(np);
6394 struct nfs_open_file *nofp;
6395 struct nfs_file_lock *nflp, *nextnflp;
6396
6397 /* drop held locks */
6398 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6399 /* skip dead & blocked lock requests */
6400 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6401 continue;
6402 }
6403 /* send an unlock if not a delegated lock */
6404 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6405 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6406 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6407 }
6408 /* kill/remove the lock */
6409 lck_mtx_lock(&np->n_openlock);
6410 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6411 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6412 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6413 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6414 if (nflp->nfl_blockcnt) {
6415 /* wake up anyone blocked on this lock */
6416 wakeup(nflp);
6417 } else {
6418 /* remove nflp from lock list and destroy */
6419 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6420 nfs_file_lock_destroy(nflp);
6421 }
6422 lck_mtx_unlock(&np->n_openlock);
6423 }
6424
6425 lck_mtx_lock(&np->n_openlock);
6426
6427 /* drop all opens */
6428 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6429 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6430 continue;
6431 }
6432 /* mark open state as lost */
6433 lck_mtx_lock(&nofp->nof_lock);
6434 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6435 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6436
6437 lck_mtx_unlock(&nofp->nof_lock);
6438#if CONFIG_NFS4
6439 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6440 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6441 }
6442#endif
6443 }
6444
6445 lck_mtx_unlock(&np->n_openlock);
6446}
6447
6448/*
6449 * State for a node has been lost, drop it, and revoke the node.
6450 * Attempt to return any state if possible in case the server
6451 * might somehow think we hold it.
6452 */
6453void
6454nfs_revoke_open_state_for_node(nfsnode_t np)
6455{
6456 struct nfsmount *nmp;
6457
6458 /* mark node as needing to be revoked */
6459 nfs_node_lock_force(np);
6460 if (np->n_flag & NREVOKE) { /* already revoked? */
6461 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6462 nfs_node_unlock(np);
6463 return;
6464 }
6465 np->n_flag |= NREVOKE;
6466 nfs_node_unlock(np);
6467
6468 nfs_release_open_state_for_node(np, 0);
6469 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6470
6471 /* mark mount as needing a revoke scan and have the socket thread do it. */
6472 if ((nmp = NFSTONMP(np))) {
6473 lck_mtx_lock(&nmp->nm_lock);
6474 nmp->nm_state |= NFSSTA_REVOKE;
6475 nfs_mount_sock_thread_wake(nmp);
6476 lck_mtx_unlock(&nmp->nm_lock);
6477 }
6478}
6479
6480#if CONFIG_NFS4
6481/*
6482 * Claim the delegated open combinations that each of this node's open files hold.
6483 */
6484int
6485nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6486{
6487 struct nfs_open_file *nofp;
6488 int error = 0;
6489
6490 lck_mtx_lock(&np->n_openlock);
6491
6492 /* walk the open file list looking for opens with delegated state to claim */
6493restart:
6494 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6495 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6496 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6497 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6498 continue;
6499 }
6500 lck_mtx_unlock(&np->n_openlock);
6501 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6502 lck_mtx_lock(&np->n_openlock);
6503 if (error) {
6504 break;
6505 }
6506 goto restart;
6507 }
6508
6509 lck_mtx_unlock(&np->n_openlock);
6510
6511 return error;
6512}
6513
6514/*
6515 * Mark a node as needed to have its delegation returned.
6516 * Queue it up on the delegation return queue.
6517 * Make sure the thread is running.
6518 */
6519void
6520nfs4_delegation_return_enqueue(nfsnode_t np)
6521{
6522 struct nfsmount *nmp;
6523
6524 nmp = NFSTONMP(np);
6525 if (nfs_mount_gone(nmp)) {
6526 return;
6527 }
6528
6529 lck_mtx_lock(&np->n_openlock);
6530 np->n_openflags |= N_DELEG_RETURN;
6531 lck_mtx_unlock(&np->n_openlock);
6532
6533 lck_mtx_lock(&nmp->nm_lock);
6534 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6535 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6536 }
6537 nfs_mount_sock_thread_wake(nmp);
6538 lck_mtx_unlock(&nmp->nm_lock);
6539}
6540
6541/*
6542 * return any delegation we may have for the given node
6543 */
6544int
6545nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6546{
6547 struct nfsmount *nmp;
6548 fhandle_t *fh;
6549 nfs_stateid dstateid;
6550 int error;
6551
6552 nmp = NFSTONMP(np);
6553 if (nfs_mount_gone(nmp)) {
6554 return ENXIO;
6555 }
6556
6557 fh = zalloc(nfs_fhandle_zone);
6558
6559 /* first, make sure the node's marked for delegation return */
6560 lck_mtx_lock(&np->n_openlock);
6561 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6562 lck_mtx_unlock(&np->n_openlock);
6563
6564 /* make sure nobody else is using the delegation state */
6565 if ((error = nfs_open_state_set_busy(np, NULL))) {
6566 goto out;
6567 }
6568
6569 /* claim any delegated state */
6570 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6571 goto out;
6572 }
6573
6574 /* return the delegation */
6575 lck_mtx_lock(&np->n_openlock);
6576 dstateid = np->n_dstateid;
6577 fh->fh_len = np->n_fhsize;
6578 bcopy(np->n_fhp, fh->fh_data, fh->fh_len);
6579 lck_mtx_unlock(&np->n_openlock);
6580 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred);
6581 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6582 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6583 lck_mtx_lock(&np->n_openlock);
6584 np->n_openflags &= ~N_DELEG_MASK;
6585 lck_mtx_lock(&nmp->nm_lock);
6586 if (np->n_dlink.tqe_next != NFSNOLIST) {
6587 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6588 np->n_dlink.tqe_next = NFSNOLIST;
6589 }
6590 lck_mtx_unlock(&nmp->nm_lock);
6591 lck_mtx_unlock(&np->n_openlock);
6592 }
6593
6594out:
6595 /* make sure it's no longer on the return queue and clear the return flags */
6596 lck_mtx_lock(&nmp->nm_lock);
6597 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6598 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6599 np->n_dreturn.tqe_next = NFSNOLIST;
6600 }
6601 lck_mtx_unlock(&nmp->nm_lock);
6602 lck_mtx_lock(&np->n_openlock);
6603 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6604 lck_mtx_unlock(&np->n_openlock);
6605
6606 if (error) {
6607 NP(np, "nfs4_delegation_return, error %d", error);
6608 if (error == ETIMEDOUT) {
6609 nfs_need_reconnect(nmp);
6610 }
6611 if (nfs_mount_state_error_should_restart(error)) {
6612 /* make sure recovery happens */
6613 lck_mtx_lock(&nmp->nm_lock);
6614 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6615 lck_mtx_unlock(&nmp->nm_lock);
6616 }
6617 }
6618
6619 nfs_open_state_clear_busy(np);
6620 NFS_ZFREE(nfs_fhandle_zone, fh);
6621 return error;
6622}
6623
6624/*
6625 * RPC to return a delegation for a file handle
6626 */
6627int
6628nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6629{
6630 int error = 0, status, numops;
6631 uint64_t xid;
6632 struct nfsm_chain nmreq, nmrep;
6633 struct nfsreq_secinfo_args si;
6634
6635 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6636 nfsm_chain_null(&nmreq);
6637 nfsm_chain_null(&nmrep);
6638
6639 // PUTFH, DELEGRETURN
6640 numops = 2;
6641 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6642 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6643 numops--;
6644 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6645 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6646 numops--;
6647 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6648 nfsm_chain_add_stateid(error, &nmreq, sid);
6649 nfsm_chain_build_done(error, &nmreq);
6650 nfsm_assert(error, (numops == 0), EPROTO);
6651 nfsmout_if(error);
6652 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6653 nfsm_chain_skip_tag(error, &nmrep);
6654 nfsm_chain_get_32(error, &nmrep, numops);
6655 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6656 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6657nfsmout:
6658 nfsm_chain_cleanup(&nmreq);
6659 nfsm_chain_cleanup(&nmrep);
6660 return error;
6661}
6662#endif /* CONFIG_NFS4 */
6663
6664/*
6665 * NFS read call.
6666 * Just call nfs_bioread() to do the work.
6667 *
6668 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6669 * without first calling VNOP_OPEN, so we make sure the file is open here.
6670 */
6671int
6672nfs_vnop_read(
6673 struct vnop_read_args /* {
6674 * struct vnodeop_desc *a_desc;
6675 * vnode_t a_vp;
6676 * struct uio *a_uio;
6677 * int a_ioflag;
6678 * vfs_context_t a_context;
6679 * } */*ap)
6680{
6681 vnode_t vp = ap->a_vp;
6682 vfs_context_t ctx = ap->a_context;
6683 nfsnode_t np;
6684 struct nfsmount *nmp;
6685 struct nfs_open_owner *noop;
6686 struct nfs_open_file *nofp;
6687 int error;
6688
6689 if (vnode_vtype(ap->a_vp) != VREG) {
6690 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6691 }
6692
6693 np = VTONFS(vp);
6694 nmp = NFSTONMP(np);
6695 if (nfs_mount_gone(nmp)) {
6696 return ENXIO;
6697 }
6698 if (np->n_flag & NREVOKE) {
6699 return EIO;
6700 }
6701
6702 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6703 if (!noop) {
6704 return ENOMEM;
6705 }
6706restart:
6707 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6708 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6709 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6710 error = EIO;
6711 }
6712#if CONFIG_NFS4
6713 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6714 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6715 nofp = NULL;
6716 if (!error) {
6717 goto restart;
6718 }
6719 }
6720#endif
6721 if (error) {
6722 nfs_open_owner_rele(noop);
6723 return error;
6724 }
6725 /*
6726 * Since the read path is a hot path, if we already have
6727 * read access, lets go and try and do the read, without
6728 * busying the mount and open file node for this open owner.
6729 *
6730 * N.B. This is inherently racy w.r.t. an execve using
6731 * an already open file, in that the read at the end of
6732 * this routine will be racing with a potential close.
6733 * The code below ultimately has the same problem. In practice
6734 * this does not seem to be an issue.
6735 */
6736 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6737 nfs_open_owner_rele(noop);
6738 goto do_read;
6739 }
6740 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6741 if (error) {
6742 nfs_open_owner_rele(noop);
6743 return error;
6744 }
6745 /*
6746 * If we don't have a file already open with the access we need (read) then
6747 * we need to open one. Otherwise we just co-opt an open. We might not already
6748 * have access because we're trying to read the first page of the
6749 * file for execve.
6750 */
6751 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6752 if (error) {
6753 nfs_mount_state_in_use_end(nmp, 0);
6754 nfs_open_owner_rele(noop);
6755 return error;
6756 }
6757 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6758 /* we don't have the file open, so open it for read access if we're not denied */
6759 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6760 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6761 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6762 }
6763 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6764 nfs_open_file_clear_busy(nofp);
6765 nfs_mount_state_in_use_end(nmp, 0);
6766 nfs_open_owner_rele(noop);
6767 return EPERM;
6768 }
6769 if (np->n_flag & NREVOKE) {
6770 error = EIO;
6771 nfs_open_file_clear_busy(nofp);
6772 nfs_mount_state_in_use_end(nmp, 0);
6773 nfs_open_owner_rele(noop);
6774 return error;
6775 }
6776 if (nmp->nm_vers < NFS_VER4) {
6777 /* NFS v2/v3 opens are always allowed - so just add it. */
6778 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6779 }
6780#if CONFIG_NFS4
6781 else {
6782 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6783 }
6784#endif
6785 if (!error) {
6786 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6787 }
6788 }
6789 if (nofp) {
6790 nfs_open_file_clear_busy(nofp);
6791 }
6792 if (nfs_mount_state_in_use_end(nmp, error)) {
6793 nofp = NULL;
6794 goto restart;
6795 }
6796 nfs_open_owner_rele(noop);
6797 if (error) {
6798 return error;
6799 }
6800do_read:
6801 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6802}
6803
6804#if CONFIG_NFS4
6805/*
6806 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6807 * Files are created using the NFSv4 OPEN RPC. So we must open the
6808 * file to create it and then close it.
6809 */
6810int
6811nfs4_vnop_create(
6812 struct vnop_create_args /* {
6813 * struct vnodeop_desc *a_desc;
6814 * vnode_t a_dvp;
6815 * vnode_t *a_vpp;
6816 * struct componentname *a_cnp;
6817 * struct vnode_attr *a_vap;
6818 * vfs_context_t a_context;
6819 * } */*ap)
6820{
6821 vfs_context_t ctx = ap->a_context;
6822 struct componentname *cnp = ap->a_cnp;
6823 struct vnode_attr *vap = ap->a_vap;
6824 vnode_t dvp = ap->a_dvp;
6825 vnode_t *vpp = ap->a_vpp;
6826 struct nfsmount *nmp;
6827 nfsnode_t np;
6828 int error = 0, busyerror = 0, accessMode, denyMode;
6829 struct nfs_open_owner *noop = NULL;
6830 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6831
6832 nmp = VTONMP(dvp);
6833 if (nfs_mount_gone(nmp)) {
6834 return ENXIO;
6835 }
6836
6837 if (vap) {
6838 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6839 }
6840
6841 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6842 if (!noop) {
6843 return ENOMEM;
6844 }
6845
6846restart:
6847 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6848 if (error) {
6849 nfs_open_owner_rele(noop);
6850 return error;
6851 }
6852
6853 /* grab a provisional, nodeless open file */
6854 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6855 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6856 printf("nfs_vnop_create: LOST\n");
6857 error = EIO;
6858 }
6859 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6860 /* This shouldn't happen given that this is a new, nodeless nofp */
6861 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6862 nfs_open_file_destroy(newnofp);
6863 newnofp = NULL;
6864 if (!error) {
6865 nfs_mount_state_in_use_end(nmp, 0);
6866 goto restart;
6867 }
6868 }
6869 if (!error) {
6870 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6871 }
6872 if (error) {
6873 if (newnofp) {
6874 nfs_open_file_destroy(newnofp);
6875 }
6876 newnofp = NULL;
6877 goto out;
6878 }
6879
6880 /*
6881 * We're just trying to create the file.
6882 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6883 */
6884 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6885 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6886
6887 /* Do the open/create */
6888 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6889 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6890 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6891 /*
6892 * Hmm... it looks like we may have a situation where the request was
6893 * retransmitted because we didn't get the first response which successfully
6894 * created/opened the file and then the second time we were denied the open
6895 * because the mode the file was created with doesn't allow write access.
6896 *
6897 * We'll try to work around this by temporarily updating the mode and
6898 * retrying the open.
6899 */
6900 struct vnode_attr vattr;
6901
6902 /* first make sure it's there */
6903 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6904 if (!error2 && np) {
6905 nfs_node_unlock(np);
6906 *vpp = NFSTOV(np);
6907 if (vnode_vtype(NFSTOV(np)) == VREG) {
6908 VATTR_INIT(&vattr);
6909 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6910 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6911 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6912 VATTR_INIT(&vattr);
6913 VATTR_SET(&vattr, va_mode, vap->va_mode);
6914 nfs4_setattr_rpc(np, &vattr, ctx);
6915 if (!error2) {
6916 error = 0;
6917 }
6918 }
6919 }
6920 if (error) {
6921 vnode_put(*vpp);
6922 *vpp = NULL;
6923 }
6924 }
6925 }
6926 if (!error && !*vpp) {
6927 printf("nfs4_open_rpc returned without a node?\n");
6928 /* Hmmm... with no node, we have no filehandle and can't close it */
6929 error = EIO;
6930 }
6931 if (error) {
6932 /* need to cleanup our temporary nofp */
6933 nfs_open_file_clear_busy(newnofp);
6934 nfs_open_file_destroy(newnofp);
6935 newnofp = NULL;
6936 goto out;
6937 }
6938 /* After we have a node, add our open file struct to the node */
6939 np = VTONFS(*vpp);
6940 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6941 nofp = newnofp;
6942 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6943 if (error) {
6944 /* This shouldn't happen, because we passed in a new nofp to use. */
6945 printf("nfs_open_file_find_internal failed! %d\n", error);
6946 goto out;
6947 } else if (nofp != newnofp) {
6948 /*
6949 * Hmm... an open file struct already exists.
6950 * Mark the existing one busy and merge our open into it.
6951 * Then destroy the one we created.
6952 * Note: there's no chance of an open confict because the
6953 * open has already been granted.
6954 */
6955 busyerror = nfs_open_file_set_busy(nofp, NULL);
6956 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6957 nofp->nof_stateid = newnofp->nof_stateid;
6958 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6959 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6960 }
6961 nfs_open_file_clear_busy(newnofp);
6962 nfs_open_file_destroy(newnofp);
6963 }
6964 newnofp = NULL;
6965 /* mark the node as holding a create-initiated open */
6966 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6967 nofp->nof_creator = current_thread();
6968out:
6969 if (nofp && !busyerror) {
6970 nfs_open_file_clear_busy(nofp);
6971 }
6972 if (nfs_mount_state_in_use_end(nmp, error)) {
6973 nofp = newnofp = NULL;
6974 busyerror = 0;
6975 goto restart;
6976 }
6977 if (noop) {
6978 nfs_open_owner_rele(noop);
6979 }
6980 return error;
6981}
6982
6983/*
6984 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6985 */
6986int
6987nfs4_create_rpc(
6988 vfs_context_t ctx,
6989 nfsnode_t dnp,
6990 struct componentname *cnp,
6991 struct vnode_attr *vap,
6992 int type,
6993 char *link,
6994 nfsnode_t *npp)
6995{
6996 struct nfsmount *nmp;
6997 struct nfs_vattr *nvattr;
6998 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6999 int nfsvers, namedattrs, numops;
7000 u_int64_t xid = 0, savedxid = 0;
7001 nfsnode_t np = NULL;
7002 vnode_t newvp = NULL;
7003 struct nfsm_chain nmreq, nmrep;
7004 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7005 const char *tag;
7006 nfs_specdata sd;
7007 fhandle_t *fh;
7008 struct nfsreq *req;
7009 struct nfs_dulookup *dul;
7010 struct nfsreq_secinfo_args si;
7011
7012 nmp = NFSTONMP(dnp);
7013 if (nfs_mount_gone(nmp)) {
7014 return ENXIO;
7015 }
7016 nfsvers = nmp->nm_vers;
7017 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7018 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7019 return EINVAL;
7020 }
7021
7022 sd.specdata1 = sd.specdata2 = 0;
7023
7024 switch (type) {
7025 case NFLNK:
7026 tag = "symlink";
7027 break;
7028 case NFBLK:
7029 case NFCHR:
7030 tag = "mknod";
7031 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
7032 return EINVAL;
7033 }
7034 sd.specdata1 = major(vap->va_rdev);
7035 sd.specdata2 = minor(vap->va_rdev);
7036 break;
7037 case NFSOCK:
7038 case NFFIFO:
7039 tag = "mknod";
7040 break;
7041 case NFDIR:
7042 tag = "mkdir";
7043 break;
7044 default:
7045 return EINVAL;
7046 }
7047
7048 fh = zalloc(nfs_fhandle_zone);
7049 req = zalloc(nfs_req_zone);
7050 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7051 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7052 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
7053
7054 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
7055 if (!namedattrs) {
7056 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7057 }
7058
7059 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
7060 NVATTR_INIT(nvattr);
7061 nfsm_chain_null(&nmreq);
7062 nfsm_chain_null(&nmrep);
7063
7064 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
7065 numops = 6;
7066 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
7067 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
7068 numops--;
7069 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7070 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
7071 numops--;
7072 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7073 numops--;
7074 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
7075 nfsm_chain_add_32(error, &nmreq, type);
7076 if (type == NFLNK) {
7077 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
7078 } else if ((type == NFBLK) || (type == NFCHR)) {
7079 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
7080 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
7081 }
7082 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7083 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7084 numops--;
7085 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7086 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7087 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7088 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7089 numops--;
7090 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7091 numops--;
7092 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7093 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7094 nfsm_chain_build_done(error, &nmreq);
7095 nfsm_assert(error, (numops == 0), EPROTO);
7096 nfsmout_if(error);
7097
7098 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7099 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7100 if (!error) {
7101 if (!namedattrs) {
7102 nfs_dulookup_start(dul, dnp, ctx);
7103 }
7104 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7105 }
7106
7107 if ((lockerror = nfs_node_lock(dnp))) {
7108 error = lockerror;
7109 }
7110 nfsm_chain_skip_tag(error, &nmrep);
7111 nfsm_chain_get_32(error, &nmrep, numops);
7112 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7113 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7114 nfsmout_if(error);
7115 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7116 nfsm_chain_check_change_info(error, &nmrep, dnp);
7117 bmlen = NFS_ATTR_BITMAP_LEN;
7118 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7119 /* At this point if we have no error, the object was created. */
7120 /* if we don't get attributes, then we should lookitup. */
7121 create_error = error;
7122 nfsmout_if(error);
7123 nfs_vattr_set_supported(bitmap, vap);
7124 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7125 nfsmout_if(error);
7126 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7127 nfsmout_if(error);
7128 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7129 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7130 error = EBADRPC;
7131 goto nfsmout;
7132 }
7133 /* directory attributes: if we don't get them, make sure to invalidate */
7134 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7135 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7136 savedxid = xid;
7137 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7138 if (error) {
7139 NATTRINVALIDATE(dnp);
7140 }
7141
7142nfsmout:
7143 nfsm_chain_cleanup(&nmreq);
7144 nfsm_chain_cleanup(&nmrep);
7145
7146 if (!lockerror) {
7147 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7148 dnp->n_flag &= ~NNEGNCENTRIES;
7149 cache_purge_negatives(NFSTOV(dnp));
7150 }
7151 dnp->n_flag |= NMODIFIED;
7152 nfs_node_unlock(dnp);
7153 /* nfs_getattr() will check changed and purge caches */
7154 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7155 }
7156
7157 if (!error && fh->fh_len) {
7158 /* create the vnode with the filehandle and attributes */
7159 xid = savedxid;
7160 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np);
7161 if (!error) {
7162 newvp = NFSTOV(np);
7163 }
7164 }
7165
7166 if (!namedattrs) {
7167 nfs_dulookup_finish(dul, dnp, ctx);
7168 }
7169
7170 NVATTR_CLEANUP(nvattr);
7171 NFS_ZFREE(nfs_fhandle_zone, fh);
7172 NFS_ZFREE(nfs_req_zone, req);
7173 FREE(dul, M_TEMP);
7174 FREE(nvattr, M_TEMP);
7175
7176 /*
7177 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7178 * if we can succeed in looking up the object.
7179 */
7180 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7181 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7182 if (!error) {
7183 newvp = NFSTOV(np);
7184 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7185 error = EEXIST;
7186 }
7187 }
7188 }
7189 if (!busyerror) {
7190 nfs_node_clear_busy(dnp);
7191 }
7192 if (error) {
7193 if (newvp) {
7194 nfs_node_unlock(np);
7195 vnode_put(newvp);
7196 }
7197 } else {
7198 nfs_node_unlock(np);
7199 *npp = np;
7200 }
7201 return error;
7202}
7203
7204int
7205nfs4_vnop_mknod(
7206 struct vnop_mknod_args /* {
7207 * struct vnodeop_desc *a_desc;
7208 * vnode_t a_dvp;
7209 * vnode_t *a_vpp;
7210 * struct componentname *a_cnp;
7211 * struct vnode_attr *a_vap;
7212 * vfs_context_t a_context;
7213 * } */*ap)
7214{
7215 nfsnode_t np = NULL;
7216 struct nfsmount *nmp;
7217 int error;
7218
7219 nmp = VTONMP(ap->a_dvp);
7220 if (nfs_mount_gone(nmp)) {
7221 return ENXIO;
7222 }
7223
7224 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7225 return EINVAL;
7226 }
7227 switch (ap->a_vap->va_type) {
7228 case VBLK:
7229 case VCHR:
7230 case VFIFO:
7231 case VSOCK:
7232 break;
7233 default:
7234 return ENOTSUP;
7235 }
7236
7237 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7238 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7239 if (!error) {
7240 *ap->a_vpp = NFSTOV(np);
7241 }
7242 return error;
7243}
7244
7245int
7246nfs4_vnop_mkdir(
7247 struct vnop_mkdir_args /* {
7248 * struct vnodeop_desc *a_desc;
7249 * vnode_t a_dvp;
7250 * vnode_t *a_vpp;
7251 * struct componentname *a_cnp;
7252 * struct vnode_attr *a_vap;
7253 * vfs_context_t a_context;
7254 * } */*ap)
7255{
7256 nfsnode_t np = NULL;
7257 int error;
7258
7259 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7260 NFDIR, NULL, &np);
7261 if (!error) {
7262 *ap->a_vpp = NFSTOV(np);
7263 }
7264 return error;
7265}
7266
7267int
7268nfs4_vnop_symlink(
7269 struct vnop_symlink_args /* {
7270 * struct vnodeop_desc *a_desc;
7271 * vnode_t a_dvp;
7272 * vnode_t *a_vpp;
7273 * struct componentname *a_cnp;
7274 * struct vnode_attr *a_vap;
7275 * char *a_target;
7276 * vfs_context_t a_context;
7277 * } */*ap)
7278{
7279 nfsnode_t np = NULL;
7280 int error;
7281
7282 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7283 NFLNK, ap->a_target, &np);
7284 if (!error) {
7285 *ap->a_vpp = NFSTOV(np);
7286 }
7287 return error;
7288}
7289
7290int
7291nfs4_vnop_link(
7292 struct vnop_link_args /* {
7293 * struct vnodeop_desc *a_desc;
7294 * vnode_t a_vp;
7295 * vnode_t a_tdvp;
7296 * struct componentname *a_cnp;
7297 * vfs_context_t a_context;
7298 * } */*ap)
7299{
7300 vfs_context_t ctx = ap->a_context;
7301 vnode_t vp = ap->a_vp;
7302 vnode_t tdvp = ap->a_tdvp;
7303 struct componentname *cnp = ap->a_cnp;
7304 int error = 0, lockerror = ENOENT, status;
7305 struct nfsmount *nmp;
7306 nfsnode_t np = VTONFS(vp);
7307 nfsnode_t tdnp = VTONFS(tdvp);
7308 int nfsvers, numops;
7309 u_int64_t xid, savedxid;
7310 struct nfsm_chain nmreq, nmrep;
7311 struct nfsreq_secinfo_args si;
7312
7313 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7314 return EXDEV;
7315 }
7316
7317 nmp = VTONMP(vp);
7318 if (nfs_mount_gone(nmp)) {
7319 return ENXIO;
7320 }
7321 nfsvers = nmp->nm_vers;
7322 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7323 return EINVAL;
7324 }
7325 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7326 return EINVAL;
7327 }
7328
7329 /*
7330 * Push all writes to the server, so that the attribute cache
7331 * doesn't get "out of sync" with the server.
7332 * XXX There should be a better way!
7333 */
7334 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7335
7336 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7337 return error;
7338 }
7339
7340 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7341 nfsm_chain_null(&nmreq);
7342 nfsm_chain_null(&nmrep);
7343
7344 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7345 numops = 7;
7346 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7347 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7348 numops--;
7349 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7350 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7351 numops--;
7352 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7353 numops--;
7354 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7355 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7356 numops--;
7357 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7358 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7359 numops--;
7360 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7361 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7362 numops--;
7363 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7364 numops--;
7365 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7366 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7367 nfsm_chain_build_done(error, &nmreq);
7368 nfsm_assert(error, (numops == 0), EPROTO);
7369 nfsmout_if(error);
7370 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7371
7372 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7373 error = lockerror;
7374 goto nfsmout;
7375 }
7376 nfsm_chain_skip_tag(error, &nmrep);
7377 nfsm_chain_get_32(error, &nmrep, numops);
7378 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7379 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7380 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7381 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7382 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7383 /* directory attributes: if we don't get them, make sure to invalidate */
7384 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7385 savedxid = xid;
7386 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7387 if (error) {
7388 NATTRINVALIDATE(tdnp);
7389 }
7390 /* link attributes: if we don't get them, make sure to invalidate */
7391 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7392 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7393 xid = savedxid;
7394 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7395 if (error) {
7396 NATTRINVALIDATE(np);
7397 }
7398nfsmout:
7399 nfsm_chain_cleanup(&nmreq);
7400 nfsm_chain_cleanup(&nmrep);
7401 if (!lockerror) {
7402 tdnp->n_flag |= NMODIFIED;
7403 }
7404 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7405 if (error == EEXIST) {
7406 error = 0;
7407 }
7408 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7409 tdnp->n_flag &= ~NNEGNCENTRIES;
7410 cache_purge_negatives(tdvp);
7411 }
7412 if (!lockerror) {
7413 nfs_node_unlock2(tdnp, np);
7414 }
7415 nfs_node_clear_busy2(tdnp, np);
7416 return error;
7417}
7418
7419int
7420nfs4_vnop_rmdir(
7421 struct vnop_rmdir_args /* {
7422 * struct vnodeop_desc *a_desc;
7423 * vnode_t a_dvp;
7424 * vnode_t a_vp;
7425 * struct componentname *a_cnp;
7426 * vfs_context_t a_context;
7427 * } */*ap)
7428{
7429 vfs_context_t ctx = ap->a_context;
7430 vnode_t vp = ap->a_vp;
7431 vnode_t dvp = ap->a_dvp;
7432 struct componentname *cnp = ap->a_cnp;
7433 struct nfsmount *nmp;
7434 int error = 0, namedattrs;
7435 nfsnode_t np = VTONFS(vp);
7436 nfsnode_t dnp = VTONFS(dvp);
7437 struct nfs_dulookup *dul;
7438
7439 if (vnode_vtype(vp) != VDIR) {
7440 return EINVAL;
7441 }
7442
7443 nmp = NFSTONMP(dnp);
7444 if (nfs_mount_gone(nmp)) {
7445 return ENXIO;
7446 }
7447 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7448
7449 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7450 return error;
7451 }
7452
7453 MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK);
7454 if (!namedattrs) {
7455 nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7456 nfs_dulookup_start(dul, dnp, ctx);
7457 }
7458
7459 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7460 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7461
7462 nfs_name_cache_purge(dnp, np, cnp, ctx);
7463 /* nfs_getattr() will check changed and purge caches */
7464 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7465 if (!namedattrs) {
7466 nfs_dulookup_finish(dul, dnp, ctx);
7467 }
7468 nfs_node_clear_busy2(dnp, np);
7469
7470 /*
7471 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7472 */
7473 if (error == ENOENT) {
7474 error = 0;
7475 }
7476 if (!error) {
7477 /*
7478 * remove nfsnode from hash now so we can't accidentally find it
7479 * again if another object gets created with the same filehandle
7480 * before this vnode gets reclaimed
7481 */
7482 lck_mtx_lock(nfs_node_hash_mutex);
7483 if (np->n_hflag & NHHASHED) {
7484 LIST_REMOVE(np, n_hash);
7485 np->n_hflag &= ~NHHASHED;
7486 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7487 }
7488 lck_mtx_unlock(nfs_node_hash_mutex);
7489 }
7490 FREE(dul, M_TEMP);
7491 return error;
7492}
7493
7494/*
7495 * NFSv4 Named Attributes
7496 *
7497 * Both the extended attributes interface and the named streams interface
7498 * are backed by NFSv4 named attributes. The implementations for both use
7499 * a common set of routines in an attempt to reduce code duplication, to
7500 * increase efficiency, to increase caching of both names and data, and to
7501 * confine the complexity.
7502 *
7503 * Each NFS node caches its named attribute directory's file handle.
7504 * The directory nodes for the named attribute directories are handled
7505 * exactly like regular directories (with a couple minor exceptions).
7506 * Named attribute nodes are also treated as much like regular files as
7507 * possible.
7508 *
7509 * Most of the heavy lifting is done by nfs4_named_attr_get().
7510 */
7511
7512/*
7513 * Get the given node's attribute directory node.
7514 * If !fetch, then only return a cached node.
7515 * Otherwise, we will attempt to fetch the node from the server.
7516 * (Note: the node should be marked busy.)
7517 */
7518nfsnode_t
7519nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7520{
7521 nfsnode_t adnp = NULL;
7522 struct nfsmount *nmp;
7523 int error = 0, status, numops;
7524 struct nfsm_chain nmreq, nmrep;
7525 u_int64_t xid;
7526 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7527 fhandle_t *fh;
7528 struct nfs_vattr *nvattr;
7529 struct componentname cn;
7530 struct nfsreq *req;
7531 struct nfsreq_secinfo_args si;
7532
7533 nmp = NFSTONMP(np);
7534 if (nfs_mount_gone(nmp)) {
7535 return NULL;
7536 }
7537 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7538 return NULL;
7539 }
7540
7541 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7542 fh = zalloc(nfs_fhandle_zone);
7543 req = zalloc(nfs_req_zone);
7544 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7545 NVATTR_INIT(nvattr);
7546 nfsm_chain_null(&nmreq);
7547 nfsm_chain_null(&nmrep);
7548
7549 bzero(&cn, sizeof(cn));
7550 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7551 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
7552 cn.cn_nameiop = LOOKUP;
7553
7554 if (np->n_attrdirfh) {
7555 // XXX can't set parent correctly (to np) yet
7556 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7557 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7558 if (adnp) {
7559 goto nfsmout;
7560 }
7561 }
7562 if (!fetch) {
7563 error = ENOENT;
7564 goto nfsmout;
7565 }
7566
7567 // PUTFH, OPENATTR, GETATTR
7568 numops = 3;
7569 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7570 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7571 numops--;
7572 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7573 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7574 numops--;
7575 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7576 nfsm_chain_add_32(error, &nmreq, 0);
7577 numops--;
7578 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7579 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7580 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7581 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7582 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7583 nfsm_chain_build_done(error, &nmreq);
7584 nfsm_assert(error, (numops == 0), EPROTO);
7585 nfsmout_if(error);
7586 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7587 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7588 if (!error) {
7589 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7590 }
7591
7592 nfsm_chain_skip_tag(error, &nmrep);
7593 nfsm_chain_get_32(error, &nmrep, numops);
7594 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7595 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7596 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7597 nfsmout_if(error);
7598 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
7599 nfsmout_if(error);
7600 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
7601 error = ENOENT;
7602 goto nfsmout;
7603 }
7604 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
7605 /* (re)allocate attrdir fh buffer */
7606 if (np->n_attrdirfh) {
7607 FREE(np->n_attrdirfh, M_TEMP);
7608 }
7609 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
7610 }
7611 if (!np->n_attrdirfh) {
7612 error = ENOMEM;
7613 goto nfsmout;
7614 }
7615 /* cache the attrdir fh in the node */
7616 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
7617 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
7618 /* create node for attrdir */
7619 // XXX can't set parent correctly (to np) yet
7620 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
7621nfsmout:
7622 NVATTR_CLEANUP(nvattr);
7623 NFS_ZFREE(nfs_fhandle_zone, fh);
7624 NFS_ZFREE(nfs_req_zone, req);
7625 FREE(nvattr, M_TEMP);
7626 nfsm_chain_cleanup(&nmreq);
7627 nfsm_chain_cleanup(&nmrep);
7628
7629 if (adnp) {
7630 /* sanity check that this node is an attribute directory */
7631 if (adnp->n_vattr.nva_type != VDIR) {
7632 error = EINVAL;
7633 }
7634 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7635 error = EINVAL;
7636 }
7637 nfs_node_unlock(adnp);
7638 if (error) {
7639 vnode_put(NFSTOV(adnp));
7640 }
7641 }
7642 return error ? NULL : adnp;
7643}
7644
7645/*
7646 * Get the given node's named attribute node for the name given.
7647 *
7648 * In an effort to increase the performance of named attribute access, we try
7649 * to reduce server requests by doing the following:
7650 *
7651 * - cache the node's named attribute directory file handle in the node
7652 * - maintain a directory vnode for the attribute directory
7653 * - use name cache entries (positive and negative) to speed up lookups
7654 * - optionally open the named attribute (with the given accessMode) in the same RPC
7655 * - combine attribute directory retrieval with the lookup/open RPC
7656 * - optionally prefetch the named attribute's first block of data in the same RPC
7657 *
7658 * Also, in an attempt to reduce the number of copies/variations of this code,
7659 * parts of the RPC building/processing code are conditionalized on what is
7660 * needed for any particular request (openattr, lookup vs. open, read).
7661 *
7662 * Note that because we may not have the attribute directory node when we start
7663 * the lookup/open, we lock both the node and the attribute directory node.
7664 */
7665
7666#define NFS_GET_NAMED_ATTR_CREATE 0x1
7667#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7668#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7669#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7670
7671int
7672nfs4_named_attr_get(
7673 nfsnode_t np,
7674 struct componentname *cnp,
7675 uint32_t accessMode,
7676 int flags,
7677 vfs_context_t ctx,
7678 nfsnode_t *anpp,
7679 struct nfs_open_file **nofpp)
7680{
7681 struct nfsmount *nmp;
7682 int error = 0, open_error = EIO;
7683 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7684 int create, guarded, prefetch, truncate, noopbusy = 0;
7685 int open, status, numops, hadattrdir, negnamecache;
7686 struct nfs_vattr *nvattr;
7687 struct vnode_attr vattr;
7688 nfsnode_t adnp = NULL, anp = NULL;
7689 vnode_t avp = NULL;
7690 u_int64_t xid = 0, savedxid = 0;
7691 struct nfsm_chain nmreq, nmrep;
7692 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7693 uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen;
7694 nfs_stateid stateid, dstateid;
7695 fhandle_t *fh;
7696 struct nfs_open_owner *noop = NULL;
7697 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7698 struct vnop_access_args naa;
7699 thread_t thd;
7700 kauth_cred_t cred;
7701 struct timeval now;
7702 char sbuf[64], *s;
7703 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7704 struct kauth_ace ace;
7705 struct nfsreq *req;
7706 struct nfsreq_secinfo_args si;
7707
7708 *anpp = NULL;
7709 rflags = delegation = recall = eof = rlen = retlen = 0;
7710 ace.ace_flags = 0;
7711 s = sbuf;
7712 slen = sizeof(sbuf);
7713
7714 nmp = NFSTONMP(np);
7715 if (nfs_mount_gone(nmp)) {
7716 return ENXIO;
7717 }
7718 fh = zalloc(nfs_fhandle_zone);
7719 req = zalloc(nfs_req_zone);
7720 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
7721 NVATTR_INIT(nvattr);
7722 fh->fh_len = 0;
7723 bzero(&dstateid, sizeof(dstateid));
7724 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7725 thd = vfs_context_thread(ctx);
7726 cred = vfs_context_ucred(ctx);
7727 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7728 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7729 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7730 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7731
7732 if (!create) {
7733 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
7734 if (error) {
7735 goto out_free;
7736 }
7737 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7738 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7739 error = ENOATTR;
7740 goto out_free;
7741 }
7742 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7743 /* shouldn't happen... but just be safe */
7744 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7745 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7746 }
7747 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7748 if (open) {
7749 /*
7750 * We're trying to open the file.
7751 * We'll create/open it with the given access mode,
7752 * and set NFS_OPEN_FILE_CREATE.
7753 */
7754 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7755 if (prefetch && guarded) {
7756 prefetch = 0; /* no sense prefetching data that can't be there */
7757 }
7758 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7759 if (!noop) {
7760 error = ENOMEM;
7761 goto out_free;
7762 }
7763 }
7764
7765 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7766 goto out_free;
7767 }
7768
7769 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7770 hadattrdir = (adnp != NULL);
7771 if (prefetch) {
7772 microuptime(&now);
7773 /* use the special state ID because we don't have a real one to send */
7774 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7775 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7776 }
7777 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7778 nfsm_chain_null(&nmreq);
7779 nfsm_chain_null(&nmrep);
7780
7781 if (hadattrdir) {
7782 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7783 goto nfsmout;
7784 }
7785 /* nfs_getattr() will check changed and purge caches */
7786 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7787 nfsmout_if(error);
7788 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7789 switch (error) {
7790 case ENOENT:
7791 /* negative cache entry */
7792 goto nfsmout;
7793 case 0:
7794 /* cache miss */
7795 /* try dir buf cache lookup */
7796 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL);
7797 if (!error && anp) {
7798 /* dir buf cache hit */
7799 *anpp = anp;
7800 error = -1;
7801 }
7802 if (error != -1) { /* cache miss */
7803 break;
7804 }
7805 OS_FALLTHROUGH;
7806 case -1:
7807 /* cache hit, not really an error */
7808 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7809 if (!anp && avp) {
7810 *anpp = anp = VTONFS(avp);
7811 }
7812
7813 nfs_node_clear_busy(adnp);
7814 adbusyerror = ENOENT;
7815
7816 /* check for directory access */
7817 naa.a_desc = &vnop_access_desc;
7818 naa.a_vp = NFSTOV(adnp);
7819 naa.a_action = KAUTH_VNODE_SEARCH;
7820 naa.a_context = ctx;
7821
7822 /* compute actual success/failure based on accessibility */
7823 error = nfs_vnop_access(&naa);
7824 OS_FALLTHROUGH;
7825 default:
7826 /* we either found it, or hit an error */
7827 if (!error && guarded) {
7828 /* found cached entry but told not to use it */
7829 error = EEXIST;
7830 vnode_put(NFSTOV(anp));
7831 *anpp = anp = NULL;
7832 }
7833 /* we're done if error or we don't need to open */
7834 if (error || !open) {
7835 goto nfsmout;
7836 }
7837 /* no error and we need to open... */
7838 }
7839 }
7840
7841 if (open) {
7842restart:
7843 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7844 if (error) {
7845 nfs_open_owner_rele(noop);
7846 noop = NULL;
7847 goto nfsmout;
7848 }
7849 inuse = 1;
7850
7851 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7852 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7853 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7854 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7855 error = EIO;
7856 }
7857 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7858 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7859 nfs_open_file_destroy(newnofp);
7860 newnofp = NULL;
7861 if (!error) {
7862 nfs_mount_state_in_use_end(nmp, 0);
7863 inuse = 0;
7864 goto restart;
7865 }
7866 }
7867 if (!error) {
7868 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7869 }
7870 if (error) {
7871 if (newnofp) {
7872 nfs_open_file_destroy(newnofp);
7873 }
7874 newnofp = NULL;
7875 goto nfsmout;
7876 }
7877 if (anp) {
7878 /*
7879 * We already have the node. So we just need to open
7880 * it - which we may be able to do with a delegation.
7881 */
7882 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7883 if (!error) {
7884 /* open succeeded, so our open file is no longer temporary */
7885 nofp = newnofp;
7886 nofpbusyerror = 0;
7887 newnofp = NULL;
7888 if (nofpp) {
7889 *nofpp = nofp;
7890 }
7891 }
7892 goto nfsmout;
7893 }
7894 }
7895
7896 /*
7897 * We either don't have the attrdir or we didn't find the attribute
7898 * in the name cache, so we need to talk to the server.
7899 *
7900 * If we don't have the attrdir, we'll need to ask the server for that too.
7901 * If the caller is requesting that the attribute be created, we need to
7902 * make sure the attrdir is created.
7903 * The caller may also request that the first block of an existing attribute
7904 * be retrieved at the same time.
7905 */
7906
7907 if (open) {
7908 /* need to mark the open owner busy during the RPC */
7909 if ((error = nfs_open_owner_set_busy(noop, thd))) {
7910 goto nfsmout;
7911 }
7912 noopbusy = 1;
7913 }
7914
7915 /*
7916 * We'd like to get updated post-open/lookup attributes for the
7917 * directory and we may also want to prefetch some data via READ.
7918 * We'd like the READ results to be last so that we can leave the
7919 * data in the mbufs until the end.
7920 *
7921 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7922 */
7923 numops = 5;
7924 if (!hadattrdir) {
7925 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7926 }
7927 if (prefetch) {
7928 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7929 }
7930 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7931 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7932 if (hadattrdir) {
7933 numops--;
7934 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7935 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7936 } else {
7937 numops--;
7938 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7939 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7940 numops--;
7941 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7942 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7943 numops--;
7944 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7945 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7946 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7947 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7948 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7949 }
7950 if (open) {
7951 numops--;
7952 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7953 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7954 nfsm_chain_add_32(error, &nmreq, accessMode);
7955 nfsm_chain_add_32(error, &nmreq, denyMode);
7956 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7957 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7958 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7959 nfsm_chain_add_32(error, &nmreq, create);
7960 if (create) {
7961 nfsm_chain_add_32(error, &nmreq, guarded);
7962 VATTR_INIT(&vattr);
7963 if (truncate) {
7964 VATTR_SET(&vattr, va_data_size, 0);
7965 }
7966 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7967 }
7968 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7969 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7970 } else {
7971 numops--;
7972 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7973 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7974 }
7975 numops--;
7976 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7977 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7978 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7979 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7980 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7981 if (prefetch) {
7982 numops--;
7983 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7984 }
7985 if (hadattrdir) {
7986 numops--;
7987 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7988 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7989 } else {
7990 numops--;
7991 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7992 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7993 numops--;
7994 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7995 nfsm_chain_add_32(error, &nmreq, 0);
7996 }
7997 numops--;
7998 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7999 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
8000 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
8001 if (prefetch) {
8002 numops--;
8003 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
8004 numops--;
8005 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
8006 VATTR_INIT(&vattr);
8007 VATTR_SET(&vattr, va_data_size, 0);
8008 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
8009 numops--;
8010 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
8011 nfsm_chain_add_stateid(error, &nmreq, &stateid);
8012 nfsm_chain_add_64(error, &nmreq, 0);
8013 nfsm_chain_add_32(error, &nmreq, rlen);
8014 }
8015 nfsm_chain_build_done(error, &nmreq);
8016 nfsm_assert(error, (numops == 0), EPROTO);
8017 nfsmout_if(error);
8018 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
8019 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
8020 if (!error) {
8021 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
8022 }
8023
8024 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
8025 error = adlockerror;
8026 }
8027 savedxid = xid;
8028 nfsm_chain_skip_tag(error, &nmrep);
8029 nfsm_chain_get_32(error, &nmrep, numops);
8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8031 if (!hadattrdir) {
8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8033 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8034 nfsmout_if(error);
8035 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8036 nfsmout_if(error);
8037 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) {
8038 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) {
8039 /* (re)allocate attrdir fh buffer */
8040 if (np->n_attrdirfh) {
8041 FREE(np->n_attrdirfh, M_TEMP);
8042 }
8043 MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK);
8044 }
8045 if (np->n_attrdirfh) {
8046 /* remember the attrdir fh in the node */
8047 *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */
8048 bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len);
8049 /* create busied node for attrdir */
8050 struct componentname cn;
8051 bzero(&cn, sizeof(cn));
8052 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
8053 cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER);
8054 cn.cn_nameiop = LOOKUP;
8055 // XXX can't set parent correctly (to np) yet
8056 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp);
8057 if (!error) {
8058 adlockerror = 0;
8059 /* set the node busy */
8060 SET(adnp->n_flag, NBUSY);
8061 adbusyerror = 0;
8062 }
8063 /* if no adnp, oh well... */
8064 error = 0;
8065 }
8066 }
8067 NVATTR_CLEANUP(nvattr);
8068 fh->fh_len = 0;
8069 }
8070 if (open) {
8071 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
8072 nfs_owner_seqid_increment(noop, NULL, error);
8073 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
8074 nfsm_chain_check_change_info(error, &nmrep, adnp);
8075 nfsm_chain_get_32(error, &nmrep, rflags);
8076 bmlen = NFS_ATTR_BITMAP_LEN;
8077 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
8078 nfsm_chain_get_32(error, &nmrep, delegation);
8079 if (!error) {
8080 switch (delegation) {
8081 case NFS_OPEN_DELEGATE_NONE:
8082 break;
8083 case NFS_OPEN_DELEGATE_READ:
8084 case NFS_OPEN_DELEGATE_WRITE:
8085 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
8086 nfsm_chain_get_32(error, &nmrep, recall);
8087 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
8088 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
8089 }
8090 /* if we have any trouble accepting the ACE, just invalidate it */
8091 ace_type = ace_flags = ace_mask = len = 0;
8092 nfsm_chain_get_32(error, &nmrep, ace_type);
8093 nfsm_chain_get_32(error, &nmrep, ace_flags);
8094 nfsm_chain_get_32(error, &nmrep, ace_mask);
8095 nfsm_chain_get_32(error, &nmrep, len);
8096 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
8097 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
8098 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
8099 if (!error && (len >= slen)) {
8100 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
8101 if (s) {
8102 slen = len + 1;
8103 } else {
8104 ace.ace_flags = 0;
8105 }
8106 }
8107 if (s) {
8108 nfsm_chain_get_opaque(error, &nmrep, len, s);
8109 } else {
8110 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
8111 }
8112 if (!error && s) {
8113 s[len] = '\0';
8114 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
8115 ace.ace_flags = 0;
8116 }
8117 }
8118 if (error || !s) {
8119 ace.ace_flags = 0;
8120 }
8121 if (s && (s != sbuf)) {
8122 FREE(s, M_TEMP);
8123 }
8124 break;
8125 default:
8126 error = EBADRPC;
8127 break;
8128 }
8129 }
8130 /* At this point if we have no error, the object was created/opened. */
8131 open_error = error;
8132 } else {
8133 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8134 }
8135 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8136 nfsmout_if(error);
8137 error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL);
8138 nfsmout_if(error);
8139 if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) {
8140 error = EIO;
8141 goto nfsmout;
8142 }
8143 if (prefetch) {
8144 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
8145 }
8146 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8147 if (!hadattrdir) {
8148 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8149 }
8150 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8151 nfsmout_if(error);
8152 xid = savedxid;
8153 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8154 nfsmout_if(error);
8155
8156 if (open) {
8157 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8158 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8159 }
8160 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8161 if (adnp) {
8162 nfs_node_unlock(adnp);
8163 adlockerror = ENOENT;
8164 }
8165 NVATTR_CLEANUP(nvattr);
8166 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid);
8167 nfsmout_if(error);
8168 savedxid = xid;
8169 if ((adlockerror = nfs_node_lock(adnp))) {
8170 error = adlockerror;
8171 }
8172 }
8173 }
8174
8175nfsmout:
8176 if (open && adnp && !adlockerror) {
8177 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8178 adnp->n_flag &= ~NNEGNCENTRIES;
8179 cache_purge_negatives(NFSTOV(adnp));
8180 }
8181 adnp->n_flag |= NMODIFIED;
8182 nfs_node_unlock(adnp);
8183 adlockerror = ENOENT;
8184 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8185 }
8186 if (adnp && !adlockerror && (error == ENOENT) &&
8187 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8188 /* add a negative entry in the name cache */
8189 cache_enter(NFSTOV(adnp), NULL, cnp);
8190 adnp->n_flag |= NNEGNCENTRIES;
8191 }
8192 if (adnp && !adlockerror) {
8193 nfs_node_unlock(adnp);
8194 adlockerror = ENOENT;
8195 }
8196 if (!error && !anp && fh->fh_len) {
8197 /* create the vnode with the filehandle and attributes */
8198 xid = savedxid;
8199 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp);
8200 if (!error) {
8201 *anpp = anp;
8202 nfs_node_unlock(anp);
8203 }
8204 if (!error && open) {
8205 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8206 /* After we have a node, add our open file struct to the node */
8207 nofp = newnofp;
8208 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8209 if (error) {
8210 /* This shouldn't happen, because we passed in a new nofp to use. */
8211 printf("nfs_open_file_find_internal failed! %d\n", error);
8212 nofp = NULL;
8213 } else if (nofp != newnofp) {
8214 /*
8215 * Hmm... an open file struct already exists.
8216 * Mark the existing one busy and merge our open into it.
8217 * Then destroy the one we created.
8218 * Note: there's no chance of an open confict because the
8219 * open has already been granted.
8220 */
8221 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8222 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8223 nofp->nof_stateid = newnofp->nof_stateid;
8224 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8225 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8226 }
8227 nfs_open_file_clear_busy(newnofp);
8228 nfs_open_file_destroy(newnofp);
8229 newnofp = NULL;
8230 }
8231 if (!error) {
8232 newnofp = NULL;
8233 nofpbusyerror = 0;
8234 /* mark the node as holding a create-initiated open */
8235 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8236 nofp->nof_creator = current_thread();
8237 if (nofpp) {
8238 *nofpp = nofp;
8239 }
8240 }
8241 }
8242 }
8243 NVATTR_CLEANUP(nvattr);
8244 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8245 if (!error && anp && !recall) {
8246 /* stuff the delegation state in the node */
8247 lck_mtx_lock(&anp->n_openlock);
8248 anp->n_openflags &= ~N_DELEG_MASK;
8249 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8250 anp->n_dstateid = dstateid;
8251 anp->n_dace = ace;
8252 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8253 lck_mtx_lock(&nmp->nm_lock);
8254 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8255 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8256 }
8257 lck_mtx_unlock(&nmp->nm_lock);
8258 }
8259 lck_mtx_unlock(&anp->n_openlock);
8260 } else {
8261 /* give the delegation back */
8262 if (anp) {
8263 if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) {
8264 /* update delegation state and return it */
8265 lck_mtx_lock(&anp->n_openlock);
8266 anp->n_openflags &= ~N_DELEG_MASK;
8267 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8268 anp->n_dstateid = dstateid;
8269 anp->n_dace = ace;
8270 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8271 lck_mtx_lock(&nmp->nm_lock);
8272 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8273 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8274 }
8275 lck_mtx_unlock(&nmp->nm_lock);
8276 }
8277 lck_mtx_unlock(&anp->n_openlock);
8278 /* don't need to send a separate delegreturn for fh */
8279 fh->fh_len = 0;
8280 }
8281 /* return anp's current delegation */
8282 nfs4_delegation_return(anp, 0, thd, cred);
8283 }
8284 if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */
8285 nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred);
8286 }
8287 }
8288 }
8289 if (open) {
8290 if (newnofp) {
8291 /* need to cleanup our temporary nofp */
8292 nfs_open_file_clear_busy(newnofp);
8293 nfs_open_file_destroy(newnofp);
8294 newnofp = NULL;
8295 } else if (nofp && !nofpbusyerror) {
8296 nfs_open_file_clear_busy(nofp);
8297 nofpbusyerror = ENOENT;
8298 }
8299 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8300 inuse = 0;
8301 nofp = newnofp = NULL;
8302 rflags = delegation = recall = eof = rlen = retlen = 0;
8303 ace.ace_flags = 0;
8304 s = sbuf;
8305 slen = sizeof(sbuf);
8306 nfsm_chain_cleanup(&nmreq);
8307 nfsm_chain_cleanup(&nmrep);
8308 if (anp) {
8309 vnode_put(NFSTOV(anp));
8310 *anpp = anp = NULL;
8311 }
8312 hadattrdir = (adnp != NULL);
8313 if (noopbusy) {
8314 nfs_open_owner_clear_busy(noop);
8315 noopbusy = 0;
8316 }
8317 goto restart;
8318 }
8319 inuse = 0;
8320 if (noop) {
8321 if (noopbusy) {
8322 nfs_open_owner_clear_busy(noop);
8323 noopbusy = 0;
8324 }
8325 nfs_open_owner_rele(noop);
8326 }
8327 }
8328 if (!error && prefetch && nmrep.nmc_mhead) {
8329 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8330 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8331 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8332 nfsm_chain_get_32(error, &nmrep, eof);
8333 nfsm_chain_get_32(error, &nmrep, retlen);
8334 if (!error && anp) {
8335 /*
8336 * There can be one problem with doing the prefetch.
8337 * Because we don't have the node before we start the RPC, we
8338 * can't have the buffer busy while the READ is performed.
8339 * So there is a chance that other I/O occured on the same
8340 * range of data while we were performing this RPC. If that
8341 * happens, then it's possible the data we have in the READ
8342 * response is no longer up to date.
8343 * Once we have the node and the buffer, we need to make sure
8344 * that there's no chance we could be putting stale data in
8345 * the buffer.
8346 * So, we check if the range read is dirty or if any I/O may
8347 * have occured on it while we were performing our RPC.
8348 */
8349 struct nfsbuf *bp = NULL;
8350 int lastpg;
8351 nfsbufpgs pagemask, pagemaskand;
8352
8353 retlen = MIN(retlen, rlen);
8354
8355 /* check if node needs size update or invalidation */
8356 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8357 nfs_data_update_size(anp, 0);
8358 }
8359 if (!(error = nfs_node_lock(anp))) {
8360 if (anp->n_flag & NNEEDINVALIDATE) {
8361 anp->n_flag &= ~NNEEDINVALIDATE;
8362 nfs_node_unlock(anp);
8363 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8364 if (!error) { /* lets play it safe and just drop the data */
8365 error = EIO;
8366 }
8367 } else {
8368 nfs_node_unlock(anp);
8369 }
8370 }
8371
8372 /* calculate page mask for the range of data read */
8373 lastpg = (retlen - 1) / PAGE_SIZE;
8374 nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1);
8375
8376 if (!error) {
8377 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8378 }
8379 /* don't save the data if dirty or potential I/O conflict */
8380 nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand);
8381 if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) &&
8382 timevalcmp(&anp->n_lastio, &now, <)) {
8383 OSAddAtomic64(1, &nfsstats.read_bios);
8384 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8385 SET(bp->nb_flags, NB_READ);
8386 NFS_BUF_MAP(bp);
8387 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8388 if (error) {
8389 bp->nb_error = error;
8390 SET(bp->nb_flags, NB_ERROR);
8391 } else {
8392 bp->nb_offio = 0;
8393 bp->nb_endio = rlen;
8394 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8395 bp->nb_endio = retlen;
8396 }
8397 if (eof || (retlen == 0)) {
8398 /* zero out the remaining data (up to EOF) */
8399 off_t rpcrem, eofrem, rem;
8400 rpcrem = (rlen - retlen);
8401 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8402 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8403 if (rem > 0) {
8404 bzero(bp->nb_data + retlen, rem);
8405 }
8406 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8407 /* ugh... short read ... just invalidate for now... */
8408 SET(bp->nb_flags, NB_INVAL);
8409 }
8410 }
8411 nfs_buf_read_finish(bp);
8412 microuptime(&anp->n_lastio);
8413 }
8414 if (bp) {
8415 nfs_buf_release(bp, 1);
8416 }
8417 }
8418 error = 0; /* ignore any transient error in processing the prefetch */
8419 }
8420 if (adnp && !adbusyerror) {
8421 nfs_node_clear_busy(adnp);
8422 adbusyerror = ENOENT;
8423 }
8424 if (!busyerror) {
8425 nfs_node_clear_busy(np);
8426 busyerror = ENOENT;
8427 }
8428 if (adnp) {
8429 vnode_put(NFSTOV(adnp));
8430 }
8431 if (inuse) {
8432 nfs_mount_state_in_use_end(nmp, error);
8433 }
8434 if (error && *anpp) {
8435 vnode_put(NFSTOV(*anpp));
8436 *anpp = NULL;
8437 }
8438 nfsm_chain_cleanup(&nmreq);
8439 nfsm_chain_cleanup(&nmrep);
8440out_free:
8441 NFS_ZFREE(nfs_fhandle_zone, fh);
8442 NFS_ZFREE(nfs_req_zone, req);
8443 FREE(nvattr, M_TEMP);
8444 return error;
8445}
8446
8447/*
8448 * Remove a named attribute.
8449 */
8450int
8451nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8452{
8453 nfsnode_t adnp = NULL;
8454 struct nfsmount *nmp;
8455 struct componentname cn;
8456 struct vnop_remove_args vra;
8457 int error, putanp = 0;
8458
8459 nmp = NFSTONMP(np);
8460 if (nfs_mount_gone(nmp)) {
8461 return ENXIO;
8462 }
8463
8464 bzero(&cn, sizeof(cn));
8465 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8466 cn.cn_namelen = NFS_STRLEN_INT(name);
8467 cn.cn_nameiop = DELETE;
8468 cn.cn_flags = 0;
8469
8470 if (!anp) {
8471 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8472 0, ctx, &anp, NULL);
8473 if ((!error && !anp) || (error == ENOATTR)) {
8474 error = ENOENT;
8475 }
8476 if (error) {
8477 if (anp) {
8478 vnode_put(NFSTOV(anp));
8479 anp = NULL;
8480 }
8481 goto out;
8482 }
8483 putanp = 1;
8484 }
8485
8486 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8487 goto out;
8488 }
8489 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8490 nfs_node_clear_busy(np);
8491 if (!adnp) {
8492 error = ENOENT;
8493 goto out;
8494 }
8495
8496 vra.a_desc = &vnop_remove_desc;
8497 vra.a_dvp = NFSTOV(adnp);
8498 vra.a_vp = NFSTOV(anp);
8499 vra.a_cnp = &cn;
8500 vra.a_flags = 0;
8501 vra.a_context = ctx;
8502 error = nfs_vnop_remove(&vra);
8503out:
8504 if (adnp) {
8505 vnode_put(NFSTOV(adnp));
8506 }
8507 if (putanp) {
8508 vnode_put(NFSTOV(anp));
8509 }
8510 return error;
8511}
8512
8513int
8514nfs4_vnop_getxattr(
8515 struct vnop_getxattr_args /* {
8516 * struct vnodeop_desc *a_desc;
8517 * vnode_t a_vp;
8518 * const char * a_name;
8519 * uio_t a_uio;
8520 * size_t *a_size;
8521 * int a_options;
8522 * vfs_context_t a_context;
8523 * } */*ap)
8524{
8525 vfs_context_t ctx = ap->a_context;
8526 struct nfsmount *nmp;
8527 struct nfs_vattr *nvattr;
8528 struct componentname cn;
8529 nfsnode_t anp;
8530 int error = 0, isrsrcfork;
8531
8532 nmp = VTONMP(ap->a_vp);
8533 if (nfs_mount_gone(nmp)) {
8534 return ENXIO;
8535 }
8536
8537 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8538 return ENOTSUP;
8539 }
8540
8541 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8542 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
8543 if (error) {
8544 goto out;
8545 }
8546 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8547 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8548 error = ENOATTR;
8549 goto out;
8550 }
8551
8552 bzero(&cn, sizeof(cn));
8553 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8554 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
8555 cn.cn_nameiop = LOOKUP;
8556 cn.cn_flags = MAKEENTRY;
8557
8558 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8559 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8560
8561 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8562 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8563 if ((!error && !anp) || (error == ENOENT)) {
8564 error = ENOATTR;
8565 }
8566 if (!error) {
8567 if (ap->a_uio) {
8568 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8569 } else {
8570 *ap->a_size = anp->n_size;
8571 }
8572 }
8573 if (anp) {
8574 vnode_put(NFSTOV(anp));
8575 }
8576out:
8577 FREE(nvattr, M_TEMP);
8578 return error;
8579}
8580
8581int
8582nfs4_vnop_setxattr(
8583 struct vnop_setxattr_args /* {
8584 * struct vnodeop_desc *a_desc;
8585 * vnode_t a_vp;
8586 * const char * a_name;
8587 * uio_t a_uio;
8588 * int a_options;
8589 * vfs_context_t a_context;
8590 * } */*ap)
8591{
8592 vfs_context_t ctx = ap->a_context;
8593 int options = ap->a_options;
8594 uio_t uio = ap->a_uio;
8595 const char *name = ap->a_name;
8596 struct nfsmount *nmp;
8597 struct componentname cn;
8598 nfsnode_t anp = NULL;
8599 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8600#define FINDERINFOSIZE 32
8601 uint8_t finfo[FINDERINFOSIZE];
8602 uint32_t *finfop;
8603 struct nfs_open_file *nofp = NULL;
8604 char uio_buf[UIO_SIZEOF(1)];
8605 uio_t auio;
8606 struct vnop_write_args vwa;
8607
8608 nmp = VTONMP(ap->a_vp);
8609 if (nfs_mount_gone(nmp)) {
8610 return ENXIO;
8611 }
8612
8613 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8614 return ENOTSUP;
8615 }
8616
8617 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8618 return EINVAL;
8619 }
8620
8621 /* XXX limitation based on need to back up uio on short write */
8622 if (uio_iovcnt(uio) > 1) {
8623 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8624 return EINVAL;
8625 }
8626
8627 bzero(&cn, sizeof(cn));
8628 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8629 cn.cn_namelen = NFS_STRLEN_INT(name);
8630 cn.cn_nameiop = CREATE;
8631 cn.cn_flags = MAKEENTRY;
8632
8633 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8634 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8635 if (!isrsrcfork) {
8636 uio_setoffset(uio, 0);
8637 }
8638 if (isfinderinfo) {
8639 if (uio_resid(uio) != sizeof(finfo)) {
8640 return ERANGE;
8641 }
8642 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8643 if (error) {
8644 return error;
8645 }
8646 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8647 empty = 1;
8648 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8649 if (finfop[i]) {
8650 empty = 0;
8651 break;
8652 }
8653 }
8654 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8655 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8656 if (error == ENOENT) {
8657 error = 0;
8658 }
8659 return error;
8660 }
8661 /* first, let's see if we get a create/replace error */
8662 }
8663
8664 /*
8665 * create/open the xattr
8666 *
8667 * We need to make sure not to create it if XATTR_REPLACE.
8668 * For all xattrs except the resource fork, we also want to
8669 * truncate the xattr to remove any current data. We'll do
8670 * that by setting the size to 0 on create/open.
8671 */
8672 flags = 0;
8673 if (!(options & XATTR_REPLACE)) {
8674 flags |= NFS_GET_NAMED_ATTR_CREATE;
8675 }
8676 if (options & XATTR_CREATE) {
8677 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8678 }
8679 if (!isrsrcfork) {
8680 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8681 }
8682
8683 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8684 flags, ctx, &anp, &nofp);
8685 if (!error && !anp) {
8686 error = ENOATTR;
8687 }
8688 if (error) {
8689 goto out;
8690 }
8691 /* grab the open state from the get/create/open */
8692 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8693 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8694 nofp->nof_creator = NULL;
8695 nfs_open_file_clear_busy(nofp);
8696 }
8697
8698 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8699 if (isfinderinfo && empty) {
8700 goto doclose;
8701 }
8702
8703 /*
8704 * Write the data out and flush.
8705 *
8706 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8707 */
8708 vwa.a_desc = &vnop_write_desc;
8709 vwa.a_vp = NFSTOV(anp);
8710 vwa.a_uio = NULL;
8711 vwa.a_ioflag = 0;
8712 vwa.a_context = ctx;
8713 if (isfinderinfo) {
8714 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8715 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8716 vwa.a_uio = auio;
8717 } else if (uio_resid(uio) > 0) {
8718 vwa.a_uio = uio;
8719 }
8720 if (vwa.a_uio) {
8721 error = nfs_vnop_write(&vwa);
8722 if (!error) {
8723 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8724 }
8725 }
8726doclose:
8727 /* Close the xattr. */
8728 if (nofp) {
8729 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8730 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8731 if (!busyerror) {
8732 nfs_open_file_clear_busy(nofp);
8733 }
8734 }
8735 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8736 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8737 if (error == ENOENT) {
8738 error = 0;
8739 }
8740 }
8741 if (!error) {
8742 error = closeerror;
8743 }
8744out:
8745 if (anp) {
8746 vnode_put(NFSTOV(anp));
8747 }
8748 if (error == ENOENT) {
8749 error = ENOATTR;
8750 }
8751 return error;
8752}
8753
8754int
8755nfs4_vnop_removexattr(
8756 struct vnop_removexattr_args /* {
8757 * struct vnodeop_desc *a_desc;
8758 * vnode_t a_vp;
8759 * const char * a_name;
8760 * int a_options;
8761 * vfs_context_t a_context;
8762 * } */*ap)
8763{
8764 struct nfsmount *nmp = VTONMP(ap->a_vp);
8765 int error;
8766
8767 if (nfs_mount_gone(nmp)) {
8768 return ENXIO;
8769 }
8770 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8771 return ENOTSUP;
8772 }
8773
8774 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8775 if (error == ENOENT) {
8776 error = ENOATTR;
8777 }
8778 return error;
8779}
8780
8781int
8782nfs4_vnop_listxattr(
8783 struct vnop_listxattr_args /* {
8784 * struct vnodeop_desc *a_desc;
8785 * vnode_t a_vp;
8786 * uio_t a_uio;
8787 * size_t *a_size;
8788 * int a_options;
8789 * vfs_context_t a_context;
8790 * } */*ap)
8791{
8792 vfs_context_t ctx = ap->a_context;
8793 nfsnode_t np = VTONFS(ap->a_vp);
8794 uio_t uio = ap->a_uio;
8795 nfsnode_t adnp = NULL;
8796 struct nfsmount *nmp;
8797 int error, done, i;
8798 struct nfs_vattr *nvattr;
8799 uint64_t cookie, nextcookie, lbn = 0;
8800 struct nfsbuf *bp = NULL;
8801 struct nfs_dir_buf_header *ndbhp;
8802 struct direntry *dp;
8803
8804 nmp = VTONMP(ap->a_vp);
8805 if (nfs_mount_gone(nmp)) {
8806 return ENXIO;
8807 }
8808
8809 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8810 return ENOTSUP;
8811 }
8812
8813 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8814 error = nfs_getattr(np, nvattr, ctx, NGA_CACHED);
8815 if (error) {
8816 goto out_free;
8817 }
8818 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8819 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8820 error = 0;
8821 goto out_free;
8822 }
8823
8824 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8825 goto out_free;
8826 }
8827 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8828 nfs_node_clear_busy(np);
8829 if (!adnp) {
8830 goto out;
8831 }
8832
8833 if ((error = nfs_node_lock(adnp))) {
8834 goto out;
8835 }
8836
8837 if (adnp->n_flag & NNEEDINVALIDATE) {
8838 adnp->n_flag &= ~NNEEDINVALIDATE;
8839 nfs_invaldir(adnp);
8840 nfs_node_unlock(adnp);
8841 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8842 if (!error) {
8843 error = nfs_node_lock(adnp);
8844 }
8845 if (error) {
8846 goto out;
8847 }
8848 }
8849
8850 /*
8851 * check for need to invalidate when (re)starting at beginning
8852 */
8853 if (adnp->n_flag & NMODIFIED) {
8854 nfs_invaldir(adnp);
8855 nfs_node_unlock(adnp);
8856 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
8857 goto out;
8858 }
8859 } else {
8860 nfs_node_unlock(adnp);
8861 }
8862 /* nfs_getattr() will check changed and purge caches */
8863 if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_UNCACHED))) {
8864 goto out;
8865 }
8866
8867 if (uio && (uio_resid(uio) == 0)) {
8868 goto out;
8869 }
8870
8871 done = 0;
8872 nextcookie = lbn = 0;
8873
8874 while (!error && !done) {
8875 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8876 cookie = nextcookie;
8877getbuffer:
8878 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8879 if (error) {
8880 goto out;
8881 }
8882 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8883 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8884 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8885 ndbhp->ndbh_flags = 0;
8886 ndbhp->ndbh_count = 0;
8887 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8888 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8889 }
8890 error = nfs_buf_readdir(bp, ctx);
8891 if (error == NFSERR_DIRBUFDROPPED) {
8892 goto getbuffer;
8893 }
8894 if (error) {
8895 nfs_buf_release(bp, 1);
8896 }
8897 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8898 if (!nfs_node_lock(adnp)) {
8899 nfs_invaldir(adnp);
8900 nfs_node_unlock(adnp);
8901 }
8902 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8903 if (error == NFSERR_BAD_COOKIE) {
8904 error = ENOENT;
8905 }
8906 }
8907 if (error) {
8908 goto out;
8909 }
8910 }
8911
8912 /* go through all the entries copying/counting */
8913 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8914 for (i = 0; i < ndbhp->ndbh_count; i++) {
8915 if (!xattr_protected(dp->d_name)) {
8916 if (uio == NULL) {
8917 *ap->a_size += dp->d_namlen + 1;
8918 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8919 error = ERANGE;
8920 } else {
8921 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8922 if (error && (error != EFAULT)) {
8923 error = ERANGE;
8924 }
8925 }
8926 }
8927 nextcookie = dp->d_seekoff;
8928 dp = NFS_DIRENTRY_NEXT(dp);
8929 }
8930
8931 if (i == ndbhp->ndbh_count) {
8932 /* hit end of buffer, move to next buffer */
8933 lbn = nextcookie;
8934 /* if we also hit EOF, we're done */
8935 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
8936 done = 1;
8937 }
8938 }
8939 if (!error && !done && (nextcookie == cookie)) {
8940 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8941 error = EIO;
8942 }
8943 nfs_buf_release(bp, 1);
8944 }
8945out:
8946 if (adnp) {
8947 vnode_put(NFSTOV(adnp));
8948 }
8949out_free:
8950 FREE(nvattr, M_TEMP);
8951 return error;
8952}
8953
8954#if NAMEDSTREAMS
8955int
8956nfs4_vnop_getnamedstream(
8957 struct vnop_getnamedstream_args /* {
8958 * struct vnodeop_desc *a_desc;
8959 * vnode_t a_vp;
8960 * vnode_t *a_svpp;
8961 * const char *a_name;
8962 * enum nsoperation a_operation;
8963 * int a_flags;
8964 * vfs_context_t a_context;
8965 * } */*ap)
8966{
8967 vfs_context_t ctx = ap->a_context;
8968 struct nfsmount *nmp;
8969 struct nfs_vattr *nvattr;
8970 struct componentname cn;
8971 nfsnode_t anp;
8972 int error = 0;
8973
8974 nmp = VTONMP(ap->a_vp);
8975 if (nfs_mount_gone(nmp)) {
8976 return ENXIO;
8977 }
8978
8979 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8980 return ENOTSUP;
8981 }
8982
8983 MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK);
8984 error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED);
8985 if (error) {
8986 goto out;
8987 }
8988 if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8989 !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8990 error = ENOATTR;
8991 goto out;
8992 }
8993
8994 bzero(&cn, sizeof(cn));
8995 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8996 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
8997 cn.cn_nameiop = LOOKUP;
8998 cn.cn_flags = MAKEENTRY;
8999
9000 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
9001 0, ctx, &anp, NULL);
9002 if ((!error && !anp) || (error == ENOENT)) {
9003 error = ENOATTR;
9004 }
9005 if (!error && anp) {
9006 *ap->a_svpp = NFSTOV(anp);
9007 } else if (anp) {
9008 vnode_put(NFSTOV(anp));
9009 }
9010out:
9011 FREE(nvattr, M_TEMP);
9012 return error;
9013}
9014
9015int
9016nfs4_vnop_makenamedstream(
9017 struct vnop_makenamedstream_args /* {
9018 * struct vnodeop_desc *a_desc;
9019 * vnode_t *a_svpp;
9020 * vnode_t a_vp;
9021 * const char *a_name;
9022 * int a_flags;
9023 * vfs_context_t a_context;
9024 * } */*ap)
9025{
9026 vfs_context_t ctx = ap->a_context;
9027 struct nfsmount *nmp;
9028 struct componentname cn;
9029 nfsnode_t anp;
9030 int error = 0;
9031
9032 nmp = VTONMP(ap->a_vp);
9033 if (nfs_mount_gone(nmp)) {
9034 return ENXIO;
9035 }
9036
9037 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9038 return ENOTSUP;
9039 }
9040
9041 bzero(&cn, sizeof(cn));
9042 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
9043 cn.cn_namelen = NFS_STRLEN_INT(ap->a_name);
9044 cn.cn_nameiop = CREATE;
9045 cn.cn_flags = MAKEENTRY;
9046
9047 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
9048 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
9049 if ((!error && !anp) || (error == ENOENT)) {
9050 error = ENOATTR;
9051 }
9052 if (!error && anp) {
9053 *ap->a_svpp = NFSTOV(anp);
9054 } else if (anp) {
9055 vnode_put(NFSTOV(anp));
9056 }
9057 return error;
9058}
9059
9060int
9061nfs4_vnop_removenamedstream(
9062 struct vnop_removenamedstream_args /* {
9063 * struct vnodeop_desc *a_desc;
9064 * vnode_t a_vp;
9065 * vnode_t a_svp;
9066 * const char *a_name;
9067 * int a_flags;
9068 * vfs_context_t a_context;
9069 * } */*ap)
9070{
9071 struct nfsmount *nmp = VTONMP(ap->a_vp);
9072 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
9073 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
9074
9075 if (nfs_mount_gone(nmp)) {
9076 return ENXIO;
9077 }
9078
9079 /*
9080 * Given that a_svp is a named stream, checking for
9081 * named attribute support is kinda pointless.
9082 */
9083 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
9084 return ENOTSUP;
9085 }
9086
9087 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
9088}
9089
9090#endif
9091#endif /* CONFIG_NFS4 */
9092
9093#endif /* CONFIG_NFS_CLIENT */