]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs4_vnops.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * vnode op calls for NFS version 4
31 */
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/resourcevar.h>
36#include <sys/proc_internal.h>
37#include <sys/kauth.h>
38#include <sys/mount_internal.h>
39#include <sys/malloc.h>
40#include <sys/kpi_mbuf.h>
41#include <sys/conf.h>
42#include <sys/vnode_internal.h>
43#include <sys/dirent.h>
44#include <sys/fcntl.h>
45#include <sys/lockf.h>
46#include <sys/ubc_internal.h>
47#include <sys/attr.h>
48#include <sys/signalvar.h>
49#include <sys/uio_internal.h>
50#include <sys/xattr.h>
51#include <sys/paths.h>
52
53#include <vfs/vfs_support.h>
54
55#include <sys/vm.h>
56
57#include <sys/time.h>
58#include <kern/clock.h>
59#include <libkern/OSAtomic.h>
60
61#include <miscfs/fifofs/fifo.h>
62#include <miscfs/specfs/specdev.h>
63
64#include <nfs/rpcv2.h>
65#include <nfs/nfsproto.h>
66#include <nfs/nfs.h>
67#include <nfs/nfsnode.h>
68#include <nfs/nfs_gss.h>
69#include <nfs/nfsmount.h>
70#include <nfs/nfs_lock.h>
71#include <nfs/xdr_subs.h>
72#include <nfs/nfsm_subs.h>
73
74#include <net/if.h>
75#include <netinet/in.h>
76#include <netinet/in_var.h>
77#include <vm/vm_kern.h>
78
79#include <kern/task.h>
80#include <kern/sched_prim.h>
81
82#if CONFIG_NFS4
83int
84nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
85{
86 int error = 0, lockerror = ENOENT, status, numops, slot;
87 u_int64_t xid;
88 struct nfsm_chain nmreq, nmrep;
89 struct timeval now;
90 uint32_t access_result = 0, supported = 0, missing;
91 struct nfsmount *nmp = NFSTONMP(np);
92 int nfsvers = nmp->nm_vers;
93 uid_t uid;
94 struct nfsreq_secinfo_args si;
95
96 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
97 return 0;
98 }
99
100 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
101 nfsm_chain_null(&nmreq);
102 nfsm_chain_null(&nmrep);
103
104 // PUTFH, ACCESS, GETATTR
105 numops = 3;
106 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
107 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
108 numops--;
109 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
110 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
113 nfsm_chain_add_32(error, &nmreq, *access);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
116 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
117 nfsm_chain_build_done(error, &nmreq);
118 nfsm_assert(error, (numops == 0), EPROTO);
119 nfsmout_if(error);
120 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
121 vfs_context_thread(ctx), vfs_context_ucred(ctx),
122 &si, rpcflags, &nmrep, &xid, &status);
123
124 if ((lockerror = nfs_node_lock(np))) {
125 error = lockerror;
126 }
127 nfsm_chain_skip_tag(error, &nmrep);
128 nfsm_chain_get_32(error, &nmrep, numops);
129 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
130 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
131 nfsm_chain_get_32(error, &nmrep, supported);
132 nfsm_chain_get_32(error, &nmrep, access_result);
133 nfsmout_if(error);
134 if ((missing = (*access & ~supported))) {
135 /* missing support for something(s) we wanted */
136 if (missing & NFS_ACCESS_DELETE) {
137 /*
138 * If the server doesn't report DELETE (possible
139 * on UNIX systems), we'll assume that it is OK
140 * and just let any subsequent delete action fail
141 * if it really isn't deletable.
142 */
143 access_result |= NFS_ACCESS_DELETE;
144 }
145 }
146 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
147 if (nfs_access_dotzfs) {
148 vnode_t dvp = NULLVP;
149 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
150 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
151 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
152 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
153 }
154 if (dvp != NULLVP) {
155 vnode_put(dvp);
156 }
157 }
158 /* Some servers report DELETE support but erroneously give a denied answer. */
159 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
160 access_result |= NFS_ACCESS_DELETE;
161 }
162 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
163 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
164 nfsmout_if(error);
165
166 if (nfs_mount_gone(nmp)) {
167 error = ENXIO;
168 }
169 nfsmout_if(error);
170
171 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
172 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
173 } else {
174 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
175 }
176 slot = nfs_node_access_slot(np, uid, 1);
177 np->n_accessuid[slot] = uid;
178 microuptime(&now);
179 np->n_accessstamp[slot] = now.tv_sec;
180 np->n_access[slot] = access_result;
181
182 /* pass back the access returned with this request */
183 *access = np->n_access[slot];
184nfsmout:
185 if (!lockerror) {
186 nfs_node_unlock(np);
187 }
188 nfsm_chain_cleanup(&nmreq);
189 nfsm_chain_cleanup(&nmrep);
190 return error;
191}
192
193int
194nfs4_getattr_rpc(
195 nfsnode_t np,
196 mount_t mp,
197 u_char *fhp,
198 size_t fhsize,
199 int flags,
200 vfs_context_t ctx,
201 struct nfs_vattr *nvap,
202 u_int64_t *xidp)
203{
204 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
205 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
206 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
207 struct nfsm_chain nmreq, nmrep;
208 struct nfsreq_secinfo_args si;
209
210 if (nfs_mount_gone(nmp)) {
211 return ENXIO;
212 }
213 nfsvers = nmp->nm_vers;
214 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
215
216 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
217 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
218 return 0;
219 }
220
221 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
222 rpcflags = R_RECOVER;
223 }
224
225 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
226 rpcflags |= R_SOFT;
227 }
228
229 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
230 nfsm_chain_null(&nmreq);
231 nfsm_chain_null(&nmrep);
232
233 // PUTFH, GETATTR
234 numops = 2;
235 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
236 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
237 numops--;
238 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
239 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
242 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
243 if ((flags & NGA_ACL) && acls) {
244 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
245 }
246 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
247 nfsm_chain_build_done(error, &nmreq);
248 nfsm_assert(error, (numops == 0), EPROTO);
249 nfsmout_if(error);
250 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
251 vfs_context_thread(ctx), vfs_context_ucred(ctx),
252 NULL, rpcflags, &nmrep, xidp, &status);
253
254 nfsm_chain_skip_tag(error, &nmrep);
255 nfsm_chain_get_32(error, &nmrep, numops);
256 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
257 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
258 nfsmout_if(error);
259 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
260 nfsmout_if(error);
261 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
262 /* we asked for the ACL but didn't get one... assume there isn't one */
263 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
264 nvap->nva_acl = NULL;
265 }
266nfsmout:
267 nfsm_chain_cleanup(&nmreq);
268 nfsm_chain_cleanup(&nmrep);
269 return error;
270}
271
272int
273nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
274{
275 struct nfsmount *nmp;
276 int error = 0, lockerror = ENOENT, status, numops;
277 uint32_t len = 0;
278 u_int64_t xid;
279 struct nfsm_chain nmreq, nmrep;
280 struct nfsreq_secinfo_args si;
281
282 nmp = NFSTONMP(np);
283 if (nfs_mount_gone(nmp)) {
284 return ENXIO;
285 }
286 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
287 return EINVAL;
288 }
289 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
290 nfsm_chain_null(&nmreq);
291 nfsm_chain_null(&nmrep);
292
293 // PUTFH, GETATTR, READLINK
294 numops = 3;
295 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
296 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
297 numops--;
298 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
299 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
302 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
305 nfsm_chain_build_done(error, &nmreq);
306 nfsm_assert(error, (numops == 0), EPROTO);
307 nfsmout_if(error);
308 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
309
310 if ((lockerror = nfs_node_lock(np))) {
311 error = lockerror;
312 }
313 nfsm_chain_skip_tag(error, &nmrep);
314 nfsm_chain_get_32(error, &nmrep, numops);
315 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
316 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
317 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
319 nfsm_chain_get_32(error, &nmrep, len);
320 nfsmout_if(error);
321 if (len >= *buflenp) {
322 if (np->n_size && (np->n_size < *buflenp)) {
323 len = np->n_size;
324 } else {
325 len = *buflenp - 1;
326 }
327 }
328 nfsm_chain_get_opaque(error, &nmrep, len, buf);
329 if (!error) {
330 *buflenp = len;
331 }
332nfsmout:
333 if (!lockerror) {
334 nfs_node_unlock(np);
335 }
336 nfsm_chain_cleanup(&nmreq);
337 nfsm_chain_cleanup(&nmrep);
338 return error;
339}
340
341int
342nfs4_read_rpc_async(
343 nfsnode_t np,
344 off_t offset,
345 size_t len,
346 thread_t thd,
347 kauth_cred_t cred,
348 struct nfsreq_cbinfo *cb,
349 struct nfsreq **reqp)
350{
351 struct nfsmount *nmp;
352 int error = 0, nfsvers, numops;
353 nfs_stateid stateid;
354 struct nfsm_chain nmreq;
355 struct nfsreq_secinfo_args si;
356
357 nmp = NFSTONMP(np);
358 if (nfs_mount_gone(nmp)) {
359 return ENXIO;
360 }
361 nfsvers = nmp->nm_vers;
362 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
363 return EINVAL;
364 }
365
366 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
367 nfsm_chain_null(&nmreq);
368
369 // PUTFH, READ, GETATTR
370 numops = 3;
371 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
372 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
373 numops--;
374 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
375 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
378 nfs_get_stateid(np, thd, cred, &stateid);
379 nfsm_chain_add_stateid(error, &nmreq, &stateid);
380 nfsm_chain_add_64(error, &nmreq, offset);
381 nfsm_chain_add_32(error, &nmreq, len);
382 numops--;
383 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
384 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
385 nfsm_chain_build_done(error, &nmreq);
386 nfsm_assert(error, (numops == 0), EPROTO);
387 nfsmout_if(error);
388 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
389nfsmout:
390 nfsm_chain_cleanup(&nmreq);
391 return error;
392}
393
394int
395nfs4_read_rpc_async_finish(
396 nfsnode_t np,
397 struct nfsreq *req,
398 uio_t uio,
399 size_t *lenp,
400 int *eofp)
401{
402 struct nfsmount *nmp;
403 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
404 size_t retlen = 0;
405 u_int64_t xid;
406 struct nfsm_chain nmrep;
407
408 nmp = NFSTONMP(np);
409 if (nfs_mount_gone(nmp)) {
410 nfs_request_async_cancel(req);
411 return ENXIO;
412 }
413 nfsvers = nmp->nm_vers;
414
415 nfsm_chain_null(&nmrep);
416
417 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
418 if (error == EINPROGRESS) { /* async request restarted */
419 return error;
420 }
421
422 if ((lockerror = nfs_node_lock(np))) {
423 error = lockerror;
424 }
425 nfsm_chain_skip_tag(error, &nmrep);
426 nfsm_chain_get_32(error, &nmrep, numops);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
428 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
429 nfsm_chain_get_32(error, &nmrep, eof);
430 nfsm_chain_get_32(error, &nmrep, retlen);
431 if (!error) {
432 *lenp = MIN(retlen, *lenp);
433 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
434 }
435 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
436 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
437 if (!lockerror) {
438 nfs_node_unlock(np);
439 }
440 if (eofp) {
441 if (!eof && !retlen) {
442 eof = 1;
443 }
444 *eofp = eof;
445 }
446 nfsm_chain_cleanup(&nmrep);
447 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
448 microuptime(&np->n_lastio);
449 }
450 return error;
451}
452
453int
454nfs4_write_rpc_async(
455 nfsnode_t np,
456 uio_t uio,
457 size_t len,
458 thread_t thd,
459 kauth_cred_t cred,
460 int iomode,
461 struct nfsreq_cbinfo *cb,
462 struct nfsreq **reqp)
463{
464 struct nfsmount *nmp;
465 mount_t mp;
466 int error = 0, nfsvers, numops;
467 nfs_stateid stateid;
468 struct nfsm_chain nmreq;
469 struct nfsreq_secinfo_args si;
470
471 nmp = NFSTONMP(np);
472 if (nfs_mount_gone(nmp)) {
473 return ENXIO;
474 }
475 nfsvers = nmp->nm_vers;
476 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
477 return EINVAL;
478 }
479
480 /* for async mounts, don't bother sending sync write requests */
481 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
482 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
483 iomode = NFS_WRITE_UNSTABLE;
484 }
485
486 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
487 nfsm_chain_null(&nmreq);
488
489 // PUTFH, WRITE, GETATTR
490 numops = 3;
491 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
492 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
493 numops--;
494 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
495 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
496 numops--;
497 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
498 nfs_get_stateid(np, thd, cred, &stateid);
499 nfsm_chain_add_stateid(error, &nmreq, &stateid);
500 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
501 nfsm_chain_add_32(error, &nmreq, iomode);
502 nfsm_chain_add_32(error, &nmreq, len);
503 if (!error) {
504 error = nfsm_chain_add_uio(&nmreq, uio, len);
505 }
506 numops--;
507 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
508 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
509 nfsm_chain_build_done(error, &nmreq);
510 nfsm_assert(error, (numops == 0), EPROTO);
511 nfsmout_if(error);
512
513 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
514nfsmout:
515 nfsm_chain_cleanup(&nmreq);
516 return error;
517}
518
519int
520nfs4_write_rpc_async_finish(
521 nfsnode_t np,
522 struct nfsreq *req,
523 int *iomodep,
524 size_t *rlenp,
525 uint64_t *wverfp)
526{
527 struct nfsmount *nmp;
528 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
529 int committed = NFS_WRITE_FILESYNC;
530 size_t rlen = 0;
531 u_int64_t xid, wverf;
532 mount_t mp;
533 struct nfsm_chain nmrep;
534
535 nmp = NFSTONMP(np);
536 if (nfs_mount_gone(nmp)) {
537 nfs_request_async_cancel(req);
538 return ENXIO;
539 }
540 nfsvers = nmp->nm_vers;
541
542 nfsm_chain_null(&nmrep);
543
544 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
545 if (error == EINPROGRESS) { /* async request restarted */
546 return error;
547 }
548 nmp = NFSTONMP(np);
549 if (nfs_mount_gone(nmp)) {
550 error = ENXIO;
551 }
552 if (!error && (lockerror = nfs_node_lock(np))) {
553 error = lockerror;
554 }
555 nfsm_chain_skip_tag(error, &nmrep);
556 nfsm_chain_get_32(error, &nmrep, numops);
557 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
558 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
559 nfsm_chain_get_32(error, &nmrep, rlen);
560 nfsmout_if(error);
561 *rlenp = rlen;
562 if (rlen <= 0) {
563 error = NFSERR_IO;
564 }
565 nfsm_chain_get_32(error, &nmrep, committed);
566 nfsm_chain_get_64(error, &nmrep, wverf);
567 nfsmout_if(error);
568 if (wverfp) {
569 *wverfp = wverf;
570 }
571 lck_mtx_lock(&nmp->nm_lock);
572 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
573 nmp->nm_verf = wverf;
574 nmp->nm_state |= NFSSTA_HASWRITEVERF;
575 } else if (nmp->nm_verf != wverf) {
576 nmp->nm_verf = wverf;
577 }
578 lck_mtx_unlock(&nmp->nm_lock);
579 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
580 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
581nfsmout:
582 if (!lockerror) {
583 nfs_node_unlock(np);
584 }
585 nfsm_chain_cleanup(&nmrep);
586 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
587 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
588 committed = NFS_WRITE_FILESYNC;
589 }
590 *iomodep = committed;
591 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
592 microuptime(&np->n_lastio);
593 }
594 return error;
595}
596
597int
598nfs4_remove_rpc(
599 nfsnode_t dnp,
600 char *name,
601 int namelen,
602 thread_t thd,
603 kauth_cred_t cred)
604{
605 int error = 0, lockerror = ENOENT, remove_error = 0, status;
606 struct nfsmount *nmp;
607 int nfsvers, numops;
608 u_int64_t xid;
609 struct nfsm_chain nmreq, nmrep;
610 struct nfsreq_secinfo_args si;
611
612 nmp = NFSTONMP(dnp);
613 if (nfs_mount_gone(nmp)) {
614 return ENXIO;
615 }
616 nfsvers = nmp->nm_vers;
617 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
618 return EINVAL;
619 }
620 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
621restart:
622 nfsm_chain_null(&nmreq);
623 nfsm_chain_null(&nmrep);
624
625 // PUTFH, REMOVE, GETATTR
626 numops = 3;
627 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
628 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
629 numops--;
630 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
631 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
632 numops--;
633 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
634 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
637 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
638 nfsm_chain_build_done(error, &nmreq);
639 nfsm_assert(error, (numops == 0), EPROTO);
640 nfsmout_if(error);
641
642 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
643
644 if ((lockerror = nfs_node_lock(dnp))) {
645 error = lockerror;
646 }
647 nfsm_chain_skip_tag(error, &nmrep);
648 nfsm_chain_get_32(error, &nmrep, numops);
649 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
650 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
651 remove_error = error;
652 nfsm_chain_check_change_info(error, &nmrep, dnp);
653 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
654 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
655 if (error && !lockerror) {
656 NATTRINVALIDATE(dnp);
657 }
658nfsmout:
659 nfsm_chain_cleanup(&nmreq);
660 nfsm_chain_cleanup(&nmrep);
661
662 if (!lockerror) {
663 dnp->n_flag |= NMODIFIED;
664 nfs_node_unlock(dnp);
665 }
666 if (error == NFSERR_GRACE) {
667 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
668 goto restart;
669 }
670
671 return remove_error;
672}
673
674int
675nfs4_rename_rpc(
676 nfsnode_t fdnp,
677 char *fnameptr,
678 int fnamelen,
679 nfsnode_t tdnp,
680 char *tnameptr,
681 int tnamelen,
682 vfs_context_t ctx)
683{
684 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
685 struct nfsmount *nmp;
686 u_int64_t xid, savedxid;
687 struct nfsm_chain nmreq, nmrep;
688 struct nfsreq_secinfo_args si;
689
690 nmp = NFSTONMP(fdnp);
691 if (nfs_mount_gone(nmp)) {
692 return ENXIO;
693 }
694 nfsvers = nmp->nm_vers;
695 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
696 return EINVAL;
697 }
698 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
699 return EINVAL;
700 }
701
702 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
703 nfsm_chain_null(&nmreq);
704 nfsm_chain_null(&nmrep);
705
706 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
707 numops = 7;
708 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
709 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
710 numops--;
711 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
712 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
713 numops--;
714 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
715 numops--;
716 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
717 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
718 numops--;
719 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
720 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
721 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
722 numops--;
723 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
724 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
725 numops--;
726 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
727 numops--;
728 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
729 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
730 nfsm_chain_build_done(error, &nmreq);
731 nfsm_assert(error, (numops == 0), EPROTO);
732 nfsmout_if(error);
733
734 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
735
736 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
737 error = lockerror;
738 }
739 nfsm_chain_skip_tag(error, &nmrep);
740 nfsm_chain_get_32(error, &nmrep, numops);
741 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
742 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
743 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
744 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
745 nfsm_chain_check_change_info(error, &nmrep, fdnp);
746 nfsm_chain_check_change_info(error, &nmrep, tdnp);
747 /* directory attributes: if we don't get them, make sure to invalidate */
748 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
749 savedxid = xid;
750 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
751 if (error && !lockerror) {
752 NATTRINVALIDATE(tdnp);
753 }
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
755 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
756 xid = savedxid;
757 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
758 if (error && !lockerror) {
759 NATTRINVALIDATE(fdnp);
760 }
761nfsmout:
762 nfsm_chain_cleanup(&nmreq);
763 nfsm_chain_cleanup(&nmrep);
764 if (!lockerror) {
765 fdnp->n_flag |= NMODIFIED;
766 tdnp->n_flag |= NMODIFIED;
767 nfs_node_unlock2(fdnp, tdnp);
768 }
769 return error;
770}
771
772/*
773 * NFS V4 readdir RPC.
774 */
775int
776nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
777{
778 struct nfsmount *nmp;
779 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
780 int i, status, more_entries = 1, eof, bp_dropped = 0;
781 uint32_t nmreaddirsize, nmrsize;
782 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
783 uint64_t cookie, lastcookie, xid, savedxid;
784 struct nfsm_chain nmreq, nmrep, nmrepsave;
785 fhandle_t fh;
786 struct nfs_vattr nvattr, *nvattrp;
787 struct nfs_dir_buf_header *ndbhp;
788 struct direntry *dp;
789 char *padstart, padlen;
790 const char *tag;
791 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
792 struct timeval now;
793 struct nfsreq_secinfo_args si;
794
795 nmp = NFSTONMP(dnp);
796 if (nfs_mount_gone(nmp)) {
797 return ENXIO;
798 }
799 nfsvers = nmp->nm_vers;
800 nmreaddirsize = nmp->nm_readdirsize;
801 nmrsize = nmp->nm_rsize;
802 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
803 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
804 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
805 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
806 return EINVAL;
807 }
808 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
809
810 /*
811 * Set up attribute request for entries.
812 * For READDIRPLUS functionality, get everything.
813 * Otherwise, just get what we need for struct direntry.
814 */
815 if (rdirplus) {
816 tag = "readdirplus";
817 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
818 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
819 } else {
820 tag = "readdir";
821 NFS_CLEAR_ATTRIBUTES(entry_attrs);
822 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
823 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
824 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
825 }
826 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
827
828 /* lock to protect access to cookie verifier */
829 if ((lockerror = nfs_node_lock(dnp))) {
830 return lockerror;
831 }
832
833 /* determine cookie to use, and move dp to the right offset */
834 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
835 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
836 if (ndbhp->ndbh_count) {
837 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
838 dp = NFS_DIRENTRY_NEXT(dp);
839 }
840 cookie = dp->d_seekoff;
841 dp = NFS_DIRENTRY_NEXT(dp);
842 } else {
843 cookie = bp->nb_lblkno;
844 /* increment with every buffer read */
845 OSAddAtomic64(1, &nfsstats.readdir_bios);
846 }
847 lastcookie = cookie;
848
849 /*
850 * The NFS client is responsible for the "." and ".." entries in the
851 * directory. So, we put them at the start of the first buffer.
852 * Don't bother for attribute directories.
853 */
854 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
855 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
856 fh.fh_len = 0;
857 fhlen = rdirplus ? fh.fh_len + 1 : 0;
858 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
859 /* "." */
860 namlen = 1;
861 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
862 if (xlen) {
863 bzero(&dp->d_name[namlen + 1], xlen);
864 }
865 dp->d_namlen = namlen;
866 strlcpy(dp->d_name, ".", namlen + 1);
867 dp->d_fileno = dnp->n_vattr.nva_fileid;
868 dp->d_type = DT_DIR;
869 dp->d_reclen = reclen;
870 dp->d_seekoff = 1;
871 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
872 dp = NFS_DIRENTRY_NEXT(dp);
873 padlen = (char*)dp - padstart;
874 if (padlen > 0) {
875 bzero(padstart, padlen);
876 }
877 if (rdirplus) { /* zero out attributes */
878 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
879 }
880
881 /* ".." */
882 namlen = 2;
883 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
884 if (xlen) {
885 bzero(&dp->d_name[namlen + 1], xlen);
886 }
887 dp->d_namlen = namlen;
888 strlcpy(dp->d_name, "..", namlen + 1);
889 if (dnp->n_parent) {
890 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
891 } else {
892 dp->d_fileno = dnp->n_vattr.nva_fileid;
893 }
894 dp->d_type = DT_DIR;
895 dp->d_reclen = reclen;
896 dp->d_seekoff = 2;
897 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
898 dp = NFS_DIRENTRY_NEXT(dp);
899 padlen = (char*)dp - padstart;
900 if (padlen > 0) {
901 bzero(padstart, padlen);
902 }
903 if (rdirplus) { /* zero out attributes */
904 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
905 }
906
907 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
908 ndbhp->ndbh_count = 2;
909 }
910
911 /*
912 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
913 * the buffer is full (or we hit EOF). Then put the remainder of the
914 * results in the next buffer(s).
915 */
916 nfsm_chain_null(&nmreq);
917 nfsm_chain_null(&nmrep);
918 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
919 // PUTFH, GETATTR, READDIR
920 numops = 3;
921 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
922 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
923 numops--;
924 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
925 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
926 numops--;
927 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
928 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
929 numops--;
930 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
931 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
932 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
933 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
934 nfsm_chain_add_32(error, &nmreq, nmrsize);
935 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
936 nfsm_chain_build_done(error, &nmreq);
937 nfsm_assert(error, (numops == 0), EPROTO);
938 nfs_node_unlock(dnp);
939 nfsmout_if(error);
940 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
941
942 if ((lockerror = nfs_node_lock(dnp))) {
943 error = lockerror;
944 }
945
946 savedxid = xid;
947 nfsm_chain_skip_tag(error, &nmrep);
948 nfsm_chain_get_32(error, &nmrep, numops);
949 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
950 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
951 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
952 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
953 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
954 nfsm_chain_get_32(error, &nmrep, more_entries);
955
956 if (!lockerror) {
957 nfs_node_unlock(dnp);
958 lockerror = ENOENT;
959 }
960 nfsmout_if(error);
961
962 if (rdirplus) {
963 microuptime(&now);
964 }
965
966 /* loop through the entries packing them into the buffer */
967 while (more_entries) {
968 /* Entry: COOKIE, NAME, FATTR */
969 nfsm_chain_get_64(error, &nmrep, cookie);
970 nfsm_chain_get_32(error, &nmrep, namlen);
971 nfsmout_if(error);
972 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
973 /* we've got a big cookie, make sure flag is set */
974 lck_mtx_lock(&nmp->nm_lock);
975 nmp->nm_state |= NFSSTA_BIGCOOKIES;
976 lck_mtx_unlock(&nmp->nm_lock);
977 bigcookies = 1;
978 }
979 /* just truncate names that don't fit in direntry.d_name */
980 if (namlen <= 0) {
981 error = EBADRPC;
982 goto nfsmout;
983 }
984 if (namlen > (sizeof(dp->d_name) - 1)) {
985 skiplen = namlen - sizeof(dp->d_name) + 1;
986 namlen = sizeof(dp->d_name) - 1;
987 } else {
988 skiplen = 0;
989 }
990 /* guess that fh size will be same as parent */
991 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
992 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
993 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
994 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
995 space_needed = reclen + attrlen;
996 space_free = nfs_dir_buf_freespace(bp, rdirplus);
997 if (space_needed > space_free) {
998 /*
999 * We still have entries to pack, but we've
1000 * run out of room in the current buffer.
1001 * So we need to move to the next buffer.
1002 * The block# for the next buffer is the
1003 * last cookie in the current buffer.
1004 */
1005nextbuffer:
1006 ndbhp->ndbh_flags |= NDB_FULL;
1007 nfs_buf_release(bp, 0);
1008 bp_dropped = 1;
1009 bp = NULL;
1010 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1011 nfsmout_if(error);
1012 /* initialize buffer */
1013 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1014 ndbhp->ndbh_flags = 0;
1015 ndbhp->ndbh_count = 0;
1016 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1017 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1018 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1019 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1020 /* increment with every buffer read */
1021 OSAddAtomic64(1, &nfsstats.readdir_bios);
1022 }
1023 nmrepsave = nmrep;
1024 dp->d_fileno = cookie; /* placeholder */
1025 dp->d_seekoff = cookie;
1026 dp->d_namlen = namlen;
1027 dp->d_reclen = reclen;
1028 dp->d_type = DT_UNKNOWN;
1029 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1030 nfsmout_if(error);
1031 dp->d_name[namlen] = '\0';
1032 if (skiplen) {
1033 nfsm_chain_adv(error, &nmrep,
1034 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1035 }
1036 nfsmout_if(error);
1037 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
1038 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
1039 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1040 /* we do NOT want ACLs returned to us here */
1041 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1042 if (nvattrp->nva_acl) {
1043 kauth_acl_free(nvattrp->nva_acl);
1044 nvattrp->nva_acl = NULL;
1045 }
1046 }
1047 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1048 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1049 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1050 /* set this up to look like a referral trigger */
1051 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
1052 }
1053 error = 0;
1054 }
1055 /* check for more entries after this one */
1056 nfsm_chain_get_32(error, &nmrep, more_entries);
1057 nfsmout_if(error);
1058
1059 /* Skip any "." and ".." entries returned from server. */
1060 /* Also skip any bothersome named attribute entries. */
1061 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1062 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1063 lastcookie = cookie;
1064 continue;
1065 }
1066
1067 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1068 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1069 }
1070 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1071 dp->d_fileno = nvattrp->nva_fileid;
1072 }
1073 if (rdirplus) {
1074 /* fileid is already in d_fileno, so stash xid in attrs */
1075 nvattrp->nva_fileid = savedxid;
1076 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1077 fhlen = fh.fh_len + 1;
1078 xlen = fhlen + sizeof(time_t);
1079 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1080 space_needed = reclen + attrlen;
1081 if (space_needed > space_free) {
1082 /* didn't actually have the room... move on to next buffer */
1083 nmrep = nmrepsave;
1084 goto nextbuffer;
1085 }
1086 /* pack the file handle into the record */
1087 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
1088 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
1089 } else {
1090 /* mark the file handle invalid */
1091 fh.fh_len = 0;
1092 fhlen = fh.fh_len + 1;
1093 xlen = fhlen + sizeof(time_t);
1094 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1095 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1096 }
1097 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1098 dp->d_reclen = reclen;
1099 }
1100 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1101 ndbhp->ndbh_count++;
1102 lastcookie = cookie;
1103
1104 /* advance to next direntry in buffer */
1105 dp = NFS_DIRENTRY_NEXT(dp);
1106 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1107 /* zero out the pad bytes */
1108 padlen = (char*)dp - padstart;
1109 if (padlen > 0) {
1110 bzero(padstart, padlen);
1111 }
1112 }
1113 /* Finally, get the eof boolean */
1114 nfsm_chain_get_32(error, &nmrep, eof);
1115 nfsmout_if(error);
1116 if (eof) {
1117 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1118 nfs_node_lock_force(dnp);
1119 dnp->n_eofcookie = lastcookie;
1120 nfs_node_unlock(dnp);
1121 } else {
1122 more_entries = 1;
1123 }
1124 if (bp_dropped) {
1125 nfs_buf_release(bp, 0);
1126 bp = NULL;
1127 break;
1128 }
1129 if ((lockerror = nfs_node_lock(dnp))) {
1130 error = lockerror;
1131 }
1132 nfsmout_if(error);
1133 nfsm_chain_cleanup(&nmrep);
1134 nfsm_chain_null(&nmreq);
1135 }
1136nfsmout:
1137 if (bp_dropped && bp) {
1138 nfs_buf_release(bp, 0);
1139 }
1140 if (!lockerror) {
1141 nfs_node_unlock(dnp);
1142 }
1143 nfsm_chain_cleanup(&nmreq);
1144 nfsm_chain_cleanup(&nmrep);
1145 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1146}
1147
1148int
1149nfs4_lookup_rpc_async(
1150 nfsnode_t dnp,
1151 char *name,
1152 int namelen,
1153 vfs_context_t ctx,
1154 struct nfsreq **reqp)
1155{
1156 int error = 0, isdotdot = 0, nfsvers, numops;
1157 struct nfsm_chain nmreq;
1158 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1159 struct nfsmount *nmp;
1160 struct nfsreq_secinfo_args si;
1161
1162 nmp = NFSTONMP(dnp);
1163 if (nfs_mount_gone(nmp)) {
1164 return ENXIO;
1165 }
1166 nfsvers = nmp->nm_vers;
1167 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1168 return EINVAL;
1169 }
1170
1171 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1172 isdotdot = 1;
1173 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1174 } else {
1175 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1176 }
1177
1178 nfsm_chain_null(&nmreq);
1179
1180 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1181 numops = 5;
1182 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1183 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1184 numops--;
1185 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1186 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1187 numops--;
1188 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1189 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1190 numops--;
1191 if (isdotdot) {
1192 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1193 } else {
1194 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1195 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1196 }
1197 numops--;
1198 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1199 numops--;
1200 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1201 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1202 /* some ".zfs" directories can't handle being asked for some attributes */
1203 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1204 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1205 }
1206 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1207 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1208 }
1209 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1210 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1211 }
1212 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1213 nfsm_chain_build_done(error, &nmreq);
1214 nfsm_assert(error, (numops == 0), EPROTO);
1215 nfsmout_if(error);
1216 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1217 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1218nfsmout:
1219 nfsm_chain_cleanup(&nmreq);
1220 return error;
1221}
1222
1223
1224int
1225nfs4_lookup_rpc_async_finish(
1226 nfsnode_t dnp,
1227 char *name,
1228 int namelen,
1229 vfs_context_t ctx,
1230 struct nfsreq *req,
1231 u_int64_t *xidp,
1232 fhandle_t *fhp,
1233 struct nfs_vattr *nvap)
1234{
1235 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1236 uint32_t op = NFS_OP_LOOKUP;
1237 u_int64_t xid;
1238 struct nfsmount *nmp;
1239 struct nfsm_chain nmrep;
1240
1241 nmp = NFSTONMP(dnp);
1242 if (nmp == NULL) {
1243 return ENXIO;
1244 }
1245 nfsvers = nmp->nm_vers;
1246 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1247 isdotdot = 1;
1248 }
1249
1250 nfsm_chain_null(&nmrep);
1251
1252 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1253
1254 if ((lockerror = nfs_node_lock(dnp))) {
1255 error = lockerror;
1256 }
1257 nfsm_chain_skip_tag(error, &nmrep);
1258 nfsm_chain_get_32(error, &nmrep, numops);
1259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1261 if (xidp) {
1262 *xidp = xid;
1263 }
1264 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1265
1266 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1267 nfsmout_if(error || !fhp || !nvap);
1268 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1269 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1270 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1271 error = EBADRPC;
1272 }
1273 nfsmout_if(error);
1274 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1275 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1276 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1277 /* set this up to look like a referral trigger */
1278 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1279 error = 0;
1280 } else {
1281 nfsmout_if(error);
1282 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1283 }
1284nfsmout:
1285 if (!lockerror) {
1286 nfs_node_unlock(dnp);
1287 }
1288 nfsm_chain_cleanup(&nmrep);
1289 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1290 /* We still need to get SECINFO to set default for mount. */
1291 /* Do so for the first LOOKUP that returns successfully. */
1292 struct nfs_sec sec;
1293
1294 sec.count = NX_MAX_SEC_FLAVORS;
1295 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1296 /* [sigh] some implementations return "illegal" error for unsupported ops */
1297 if (error == NFSERR_OP_ILLEGAL) {
1298 error = 0;
1299 }
1300 if (!error) {
1301 /* set our default security flavor to the first in the list */
1302 lck_mtx_lock(&nmp->nm_lock);
1303 if (sec.count) {
1304 nmp->nm_auth = sec.flavors[0];
1305 }
1306 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1307 lck_mtx_unlock(&nmp->nm_lock);
1308 }
1309 }
1310 return error;
1311}
1312
1313int
1314nfs4_commit_rpc(
1315 nfsnode_t np,
1316 uint64_t offset,
1317 uint64_t count,
1318 kauth_cred_t cred,
1319 uint64_t wverf)
1320{
1321 struct nfsmount *nmp;
1322 int error = 0, lockerror, status, nfsvers, numops;
1323 u_int64_t xid, newwverf;
1324 uint32_t count32;
1325 struct nfsm_chain nmreq, nmrep;
1326 struct nfsreq_secinfo_args si;
1327
1328 nmp = NFSTONMP(np);
1329 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1330 if (nfs_mount_gone(nmp)) {
1331 return ENXIO;
1332 }
1333 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1334 return EINVAL;
1335 }
1336 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1337 return 0;
1338 }
1339 nfsvers = nmp->nm_vers;
1340
1341 if (count > UINT32_MAX) {
1342 count32 = 0;
1343 } else {
1344 count32 = count;
1345 }
1346
1347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1348 nfsm_chain_null(&nmreq);
1349 nfsm_chain_null(&nmrep);
1350
1351 // PUTFH, COMMIT, GETATTR
1352 numops = 3;
1353 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1354 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1355 numops--;
1356 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1357 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1358 numops--;
1359 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1360 nfsm_chain_add_64(error, &nmreq, offset);
1361 nfsm_chain_add_32(error, &nmreq, count32);
1362 numops--;
1363 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1364 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1365 nfsm_chain_build_done(error, &nmreq);
1366 nfsm_assert(error, (numops == 0), EPROTO);
1367 nfsmout_if(error);
1368 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1369 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1370
1371 if ((lockerror = nfs_node_lock(np))) {
1372 error = lockerror;
1373 }
1374 nfsm_chain_skip_tag(error, &nmrep);
1375 nfsm_chain_get_32(error, &nmrep, numops);
1376 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1377 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1378 nfsm_chain_get_64(error, &nmrep, newwverf);
1379 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1380 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1381 if (!lockerror) {
1382 nfs_node_unlock(np);
1383 }
1384 nfsmout_if(error);
1385 lck_mtx_lock(&nmp->nm_lock);
1386 if (nmp->nm_verf != newwverf) {
1387 nmp->nm_verf = newwverf;
1388 }
1389 if (wverf != newwverf) {
1390 error = NFSERR_STALEWRITEVERF;
1391 }
1392 lck_mtx_unlock(&nmp->nm_lock);
1393nfsmout:
1394 nfsm_chain_cleanup(&nmreq);
1395 nfsm_chain_cleanup(&nmrep);
1396 return error;
1397}
1398
1399int
1400nfs4_pathconf_rpc(
1401 nfsnode_t np,
1402 struct nfs_fsattr *nfsap,
1403 vfs_context_t ctx)
1404{
1405 u_int64_t xid;
1406 int error = 0, lockerror, status, nfsvers, numops;
1407 struct nfsm_chain nmreq, nmrep;
1408 struct nfsmount *nmp = NFSTONMP(np);
1409 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1410 struct nfs_vattr nvattr;
1411 struct nfsreq_secinfo_args si;
1412
1413 if (nfs_mount_gone(nmp)) {
1414 return ENXIO;
1415 }
1416 nfsvers = nmp->nm_vers;
1417 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1418 return EINVAL;
1419 }
1420
1421 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1422 NVATTR_INIT(&nvattr);
1423 nfsm_chain_null(&nmreq);
1424 nfsm_chain_null(&nmrep);
1425
1426 /* NFSv4: fetch "pathconf" info for this node */
1427 // PUTFH, GETATTR
1428 numops = 2;
1429 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1430 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1431 numops--;
1432 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1433 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1434 numops--;
1435 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1436 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1437 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1438 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1439 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1440 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1441 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1442 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1443 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1444 nfsm_chain_build_done(error, &nmreq);
1445 nfsm_assert(error, (numops == 0), EPROTO);
1446 nfsmout_if(error);
1447 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1448
1449 nfsm_chain_skip_tag(error, &nmrep);
1450 nfsm_chain_get_32(error, &nmrep, numops);
1451 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1452 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1453 nfsmout_if(error);
1454 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1455 nfsmout_if(error);
1456 if ((lockerror = nfs_node_lock(np))) {
1457 error = lockerror;
1458 }
1459 if (!error) {
1460 nfs_loadattrcache(np, &nvattr, &xid, 0);
1461 }
1462 if (!lockerror) {
1463 nfs_node_unlock(np);
1464 }
1465nfsmout:
1466 NVATTR_CLEANUP(&nvattr);
1467 nfsm_chain_cleanup(&nmreq);
1468 nfsm_chain_cleanup(&nmrep);
1469 return error;
1470}
1471
1472int
1473nfs4_vnop_getattr(
1474 struct vnop_getattr_args /* {
1475 * struct vnodeop_desc *a_desc;
1476 * vnode_t a_vp;
1477 * struct vnode_attr *a_vap;
1478 * vfs_context_t a_context;
1479 * } */*ap)
1480{
1481 struct vnode_attr *vap = ap->a_vap;
1482 struct nfsmount *nmp;
1483 struct nfs_vattr nva;
1484 int error, acls, ngaflags;
1485
1486 nmp = VTONMP(ap->a_vp);
1487 if (nfs_mount_gone(nmp)) {
1488 return ENXIO;
1489 }
1490 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1491
1492 ngaflags = NGA_CACHED;
1493 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1494 ngaflags |= NGA_ACL;
1495 }
1496 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1497 if (error) {
1498 return error;
1499 }
1500
1501 /* copy what we have in nva to *a_vap */
1502 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1503 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1504 VATTR_RETURN(vap, va_rdev, rdev);
1505 }
1506 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) {
1507 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1508 }
1509 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) {
1510 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1511 }
1512 // VATTR_RETURN(vap, va_data_alloc, ???);
1513 // VATTR_RETURN(vap, va_total_size, ???);
1514 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) {
1515 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1516 }
1517 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1518 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1519 }
1520 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1521 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1522 }
1523 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1524 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1525 }
1526 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1527 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1528 }
1529 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1530 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) {
1531 VATTR_RETURN(vap, va_mode, 0777);
1532 } else {
1533 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1534 }
1535 }
1536 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1537 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1538 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1539 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1540 uint32_t flags = 0;
1541 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1542 (nva.nva_flags & NFS_FFLAG_ARCHIVED)) {
1543 flags |= SF_ARCHIVED;
1544 }
1545 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1546 (nva.nva_flags & NFS_FFLAG_HIDDEN)) {
1547 flags |= UF_HIDDEN;
1548 }
1549 VATTR_RETURN(vap, va_flags, flags);
1550 }
1551 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1552 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1553 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1554 VATTR_SET_SUPPORTED(vap, va_create_time);
1555 }
1556 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1557 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1558 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1559 VATTR_SET_SUPPORTED(vap, va_access_time);
1560 }
1561 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1562 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1563 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1564 VATTR_SET_SUPPORTED(vap, va_modify_time);
1565 }
1566 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1567 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1568 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1569 VATTR_SET_SUPPORTED(vap, va_change_time);
1570 }
1571 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1572 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1573 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1574 VATTR_SET_SUPPORTED(vap, va_backup_time);
1575 }
1576 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) {
1577 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1578 }
1579 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) {
1580 VATTR_RETURN(vap, va_type, nva.nva_type);
1581 }
1582 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) {
1583 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1584 }
1585
1586 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1587 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1588 nva.nva_acl = NULL;
1589 }
1590
1591 // other attrs we might support someday:
1592 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1593
1594 NVATTR_CLEANUP(&nva);
1595 return error;
1596}
1597
1598int
1599nfs4_setattr_rpc(
1600 nfsnode_t np,
1601 struct vnode_attr *vap,
1602 vfs_context_t ctx)
1603{
1604 struct nfsmount *nmp = NFSTONMP(np);
1605 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1606 u_int64_t xid, nextxid;
1607 struct nfsm_chain nmreq, nmrep;
1608 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1609 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1610 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1611 nfs_stateid stateid;
1612 struct nfsreq_secinfo_args si;
1613
1614 if (nfs_mount_gone(nmp)) {
1615 return ENXIO;
1616 }
1617 nfsvers = nmp->nm_vers;
1618 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1619 return EINVAL;
1620 }
1621
1622 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1623 /* we don't support setting unsupported flags (duh!) */
1624 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1625 return EINVAL; /* return EINVAL if other attributes also set */
1626 } else {
1627 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1628 }
1629 }
1630
1631 /* don't bother requesting some changes if they don't look like they are changing */
1632 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1633 VATTR_CLEAR_ACTIVE(vap, va_uid);
1634 }
1635 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1636 VATTR_CLEAR_ACTIVE(vap, va_gid);
1637 }
1638 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1639 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1640 }
1641 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1642 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1643 }
1644
1645tryagain:
1646 /* do nothing if no attributes will be sent */
1647 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1648 if (!bitmap[0] && !bitmap[1]) {
1649 return 0;
1650 }
1651
1652 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1653 nfsm_chain_null(&nmreq);
1654 nfsm_chain_null(&nmrep);
1655
1656 /*
1657 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1658 * need to invalidate any cached ACL. And if we had an ACL cached,
1659 * we might as well also fetch the new value.
1660 */
1661 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1662 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1663 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1664 if (NACLVALID(np)) {
1665 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1666 }
1667 NACLINVALIDATE(np);
1668 }
1669
1670 // PUTFH, SETATTR, GETATTR
1671 numops = 3;
1672 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1673 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1674 numops--;
1675 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1676 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1677 numops--;
1678 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1679 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1680 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1681 } else {
1682 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1683 }
1684 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1685 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1686 numops--;
1687 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1688 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1689 nfsm_chain_build_done(error, &nmreq);
1690 nfsm_assert(error, (numops == 0), EPROTO);
1691 nfsmout_if(error);
1692 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1693
1694 if ((lockerror = nfs_node_lock(np))) {
1695 error = lockerror;
1696 }
1697 nfsm_chain_skip_tag(error, &nmrep);
1698 nfsm_chain_get_32(error, &nmrep, numops);
1699 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1700 nfsmout_if(error);
1701 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1702 nfsmout_if(error == EBADRPC);
1703 setattr_error = error;
1704 error = 0;
1705 bmlen = NFS_ATTR_BITMAP_LEN;
1706 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1707 if (!error) {
1708 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1709 microuptime(&np->n_lastio);
1710 }
1711 nfs_vattr_set_supported(setbitmap, vap);
1712 error = setattr_error;
1713 }
1714 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1715 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1716 if (error) {
1717 NATTRINVALIDATE(np);
1718 }
1719 /*
1720 * We just changed the attributes and we want to make sure that we
1721 * see the latest attributes. Get the next XID. If it's not the
1722 * next XID after the SETATTR XID, then it's possible that another
1723 * RPC was in flight at the same time and it might put stale attributes
1724 * in the cache. In that case, we invalidate the attributes and set
1725 * the attribute cache XID to guarantee that newer attributes will
1726 * get loaded next.
1727 */
1728 nextxid = 0;
1729 nfs_get_xid(&nextxid);
1730 if (nextxid != (xid + 1)) {
1731 np->n_xid = nextxid;
1732 NATTRINVALIDATE(np);
1733 }
1734nfsmout:
1735 if (!lockerror) {
1736 nfs_node_unlock(np);
1737 }
1738 nfsm_chain_cleanup(&nmreq);
1739 nfsm_chain_cleanup(&nmrep);
1740 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1741 /*
1742 * Some server's may not like ACL/mode combos that get sent.
1743 * If it looks like that's what the server choked on, try setting
1744 * just the ACL and not the mode (unless it looks like everything
1745 * but mode was already successfully set).
1746 */
1747 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1748 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1749 VATTR_CLEAR_ACTIVE(vap, va_mode);
1750 error = 0;
1751 goto tryagain;
1752 }
1753 }
1754 return error;
1755}
1756#endif /* CONFIG_NFS4 */
1757
1758/*
1759 * Wait for any pending recovery to complete.
1760 */
1761int
1762nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1763{
1764 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1765 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1766
1767 lck_mtx_lock(&nmp->nm_lock);
1768 while (nmp->nm_state & NFSSTA_RECOVER) {
1769 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1770 break;
1771 }
1772 nfs_mount_sock_thread_wake(nmp);
1773 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1774 slpflag = 0;
1775 }
1776 lck_mtx_unlock(&nmp->nm_lock);
1777
1778 return error;
1779}
1780
1781/*
1782 * We're about to use/manipulate NFS mount's open/lock state.
1783 * Wait for any pending state recovery to complete, then
1784 * mark the state as being in use (which will hold off
1785 * the recovery thread until we're done).
1786 */
1787int
1788nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1789{
1790 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1791 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1792
1793 if (nfs_mount_gone(nmp)) {
1794 return ENXIO;
1795 }
1796 lck_mtx_lock(&nmp->nm_lock);
1797 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1798 lck_mtx_unlock(&nmp->nm_lock);
1799 return ENXIO;
1800 }
1801 while (nmp->nm_state & NFSSTA_RECOVER) {
1802 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1803 break;
1804 }
1805 nfs_mount_sock_thread_wake(nmp);
1806 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1807 slpflag = 0;
1808 }
1809 if (!error) {
1810 nmp->nm_stateinuse++;
1811 }
1812 lck_mtx_unlock(&nmp->nm_lock);
1813
1814 return error;
1815}
1816
1817/*
1818 * We're done using/manipulating the NFS mount's open/lock
1819 * state. If the given error indicates that recovery should
1820 * be performed, we'll initiate recovery.
1821 */
1822int
1823nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1824{
1825 int restart = nfs_mount_state_error_should_restart(error);
1826
1827 if (nfs_mount_gone(nmp)) {
1828 return restart;
1829 }
1830 lck_mtx_lock(&nmp->nm_lock);
1831 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1832 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1833 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1834 nfs_need_recover(nmp, error);
1835 }
1836 if (nmp->nm_stateinuse > 0) {
1837 nmp->nm_stateinuse--;
1838 } else {
1839 panic("NFS mount state in use count underrun");
1840 }
1841 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1842 wakeup(&nmp->nm_stateinuse);
1843 }
1844 lck_mtx_unlock(&nmp->nm_lock);
1845 if (error == NFSERR_GRACE) {
1846 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1847 }
1848
1849 return restart;
1850}
1851
1852/*
1853 * Does the error mean we should restart/redo a state-related operation?
1854 */
1855int
1856nfs_mount_state_error_should_restart(int error)
1857{
1858 switch (error) {
1859 case NFSERR_STALE_STATEID:
1860 case NFSERR_STALE_CLIENTID:
1861 case NFSERR_ADMIN_REVOKED:
1862 case NFSERR_EXPIRED:
1863 case NFSERR_OLD_STATEID:
1864 case NFSERR_BAD_STATEID:
1865 case NFSERR_GRACE:
1866 return 1;
1867 }
1868 return 0;
1869}
1870
1871/*
1872 * In some cases we may want to limit how many times we restart a
1873 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1874 * Base the limit on the lease (as long as it's not too short).
1875 */
1876uint
1877nfs_mount_state_max_restarts(struct nfsmount *nmp)
1878{
1879 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1880}
1881
1882/*
1883 * Does the error mean we probably lost a delegation?
1884 */
1885int
1886nfs_mount_state_error_delegation_lost(int error)
1887{
1888 switch (error) {
1889 case NFSERR_STALE_STATEID:
1890 case NFSERR_ADMIN_REVOKED:
1891 case NFSERR_EXPIRED:
1892 case NFSERR_OLD_STATEID:
1893 case NFSERR_BAD_STATEID:
1894 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1895 return 1;
1896 }
1897 return 0;
1898}
1899
1900
1901/*
1902 * Mark an NFS node's open state as busy.
1903 */
1904int
1905nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1906{
1907 struct nfsmount *nmp;
1908 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1909 int error = 0, slpflag;
1910
1911 nmp = NFSTONMP(np);
1912 if (nfs_mount_gone(nmp)) {
1913 return ENXIO;
1914 }
1915 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1916
1917 lck_mtx_lock(&np->n_openlock);
1918 while (np->n_openflags & N_OPENBUSY) {
1919 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1920 break;
1921 }
1922 np->n_openflags |= N_OPENWANT;
1923 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1924 slpflag = 0;
1925 }
1926 if (!error) {
1927 np->n_openflags |= N_OPENBUSY;
1928 }
1929 lck_mtx_unlock(&np->n_openlock);
1930
1931 return error;
1932}
1933
1934/*
1935 * Clear an NFS node's open state busy flag and wake up
1936 * anyone wanting it.
1937 */
1938void
1939nfs_open_state_clear_busy(nfsnode_t np)
1940{
1941 int wanted;
1942
1943 lck_mtx_lock(&np->n_openlock);
1944 if (!(np->n_openflags & N_OPENBUSY)) {
1945 panic("nfs_open_state_clear_busy");
1946 }
1947 wanted = (np->n_openflags & N_OPENWANT);
1948 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1949 lck_mtx_unlock(&np->n_openlock);
1950 if (wanted) {
1951 wakeup(&np->n_openflags);
1952 }
1953}
1954
1955/*
1956 * Search a mount's open owner list for the owner for this credential.
1957 * If not found and "alloc" is set, then allocate a new one.
1958 */
1959struct nfs_open_owner *
1960nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1961{
1962 uid_t uid = kauth_cred_getuid(cred);
1963 struct nfs_open_owner *noop, *newnoop = NULL;
1964
1965tryagain:
1966 lck_mtx_lock(&nmp->nm_lock);
1967 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1968 if (kauth_cred_getuid(noop->noo_cred) == uid) {
1969 break;
1970 }
1971 }
1972
1973 if (!noop && !newnoop && alloc) {
1974 lck_mtx_unlock(&nmp->nm_lock);
1975 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1976 if (!newnoop) {
1977 return NULL;
1978 }
1979 bzero(newnoop, sizeof(*newnoop));
1980 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1981 newnoop->noo_mount = nmp;
1982 kauth_cred_ref(cred);
1983 newnoop->noo_cred = cred;
1984 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1985 TAILQ_INIT(&newnoop->noo_opens);
1986 goto tryagain;
1987 }
1988 if (!noop && newnoop) {
1989 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1990 os_ref_init(&newnoop->noo_refcnt, NULL);
1991 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1992 noop = newnoop;
1993 }
1994 lck_mtx_unlock(&nmp->nm_lock);
1995
1996 if (newnoop && (noop != newnoop)) {
1997 nfs_open_owner_destroy(newnoop);
1998 }
1999
2000 if (noop) {
2001 nfs_open_owner_ref(noop);
2002 }
2003
2004 return noop;
2005}
2006
2007/*
2008 * destroy an open owner that's no longer needed
2009 */
2010void
2011nfs_open_owner_destroy(struct nfs_open_owner *noop)
2012{
2013 if (noop->noo_cred) {
2014 kauth_cred_unref(&noop->noo_cred);
2015 }
2016 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2017 FREE(noop, M_TEMP);
2018}
2019
2020/*
2021 * acquire a reference count on an open owner
2022 */
2023void
2024nfs_open_owner_ref(struct nfs_open_owner *noop)
2025{
2026 lck_mtx_lock(&noop->noo_lock);
2027 os_ref_retain_locked(&noop->noo_refcnt);
2028 lck_mtx_unlock(&noop->noo_lock);
2029}
2030
2031/*
2032 * drop a reference count on an open owner and destroy it if
2033 * it is no longer referenced and no longer on the mount's list.
2034 */
2035void
2036nfs_open_owner_rele(struct nfs_open_owner *noop)
2037{
2038 os_ref_count_t newcount;
2039
2040 lck_mtx_lock(&noop->noo_lock);
2041 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2042 panic("nfs_open_owner_rele: no refcnt");
2043 }
2044 newcount = os_ref_release_locked(&noop->noo_refcnt);
2045 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2046 panic("nfs_open_owner_rele: busy");
2047 }
2048 /* XXX we may potentially want to clean up idle/unused open owner structures */
2049 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2050 lck_mtx_unlock(&noop->noo_lock);
2051 return;
2052 }
2053 /* owner is no longer referenced or linked to mount, so destroy it */
2054 lck_mtx_unlock(&noop->noo_lock);
2055 nfs_open_owner_destroy(noop);
2056}
2057
2058/*
2059 * Mark an open owner as busy because we are about to
2060 * start an operation that uses and updates open owner state.
2061 */
2062int
2063nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2064{
2065 struct nfsmount *nmp;
2066 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2067 int error = 0, slpflag;
2068
2069 nmp = noop->noo_mount;
2070 if (nfs_mount_gone(nmp)) {
2071 return ENXIO;
2072 }
2073 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2074
2075 lck_mtx_lock(&noop->noo_lock);
2076 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2077 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2078 break;
2079 }
2080 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2081 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2082 slpflag = 0;
2083 }
2084 if (!error) {
2085 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2086 }
2087 lck_mtx_unlock(&noop->noo_lock);
2088
2089 return error;
2090}
2091
2092/*
2093 * Clear the busy flag on an open owner and wake up anyone waiting
2094 * to mark it busy.
2095 */
2096void
2097nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2098{
2099 int wanted;
2100
2101 lck_mtx_lock(&noop->noo_lock);
2102 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2103 panic("nfs_open_owner_clear_busy");
2104 }
2105 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2106 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2107 lck_mtx_unlock(&noop->noo_lock);
2108 if (wanted) {
2109 wakeup(noop);
2110 }
2111}
2112
2113/*
2114 * Given an open/lock owner and an error code, increment the
2115 * sequence ID if appropriate.
2116 */
2117void
2118nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2119{
2120 switch (error) {
2121 case NFSERR_STALE_CLIENTID:
2122 case NFSERR_STALE_STATEID:
2123 case NFSERR_OLD_STATEID:
2124 case NFSERR_BAD_STATEID:
2125 case NFSERR_BAD_SEQID:
2126 case NFSERR_BADXDR:
2127 case NFSERR_RESOURCE:
2128 case NFSERR_NOFILEHANDLE:
2129 /* do not increment the open seqid on these errors */
2130 return;
2131 }
2132 if (noop) {
2133 noop->noo_seqid++;
2134 }
2135 if (nlop) {
2136 nlop->nlo_seqid++;
2137 }
2138}
2139
2140/*
2141 * Search a node's open file list for any conflicts with this request.
2142 * Also find this open owner's open file structure.
2143 * If not found and "alloc" is set, then allocate one.
2144 */
2145int
2146nfs_open_file_find(
2147 nfsnode_t np,
2148 struct nfs_open_owner *noop,
2149 struct nfs_open_file **nofpp,
2150 uint32_t accessMode,
2151 uint32_t denyMode,
2152 int alloc)
2153{
2154 *nofpp = NULL;
2155 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2156}
2157
2158/*
2159 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2160 * if an existing one is not found. This is used in "create" scenarios to
2161 * officially add the provisional nofp to the node once the node is created.
2162 */
2163int
2164nfs_open_file_find_internal(
2165 nfsnode_t np,
2166 struct nfs_open_owner *noop,
2167 struct nfs_open_file **nofpp,
2168 uint32_t accessMode,
2169 uint32_t denyMode,
2170 int alloc)
2171{
2172 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2173
2174 if (!np) {
2175 goto alloc;
2176 }
2177tryagain:
2178 lck_mtx_lock(&np->n_openlock);
2179 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2180 if (nofp2->nof_owner == noop) {
2181 nofp = nofp2;
2182 if (!accessMode) {
2183 break;
2184 }
2185 }
2186 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2187 /* This request conflicts with an existing open on this client. */
2188 lck_mtx_unlock(&np->n_openlock);
2189 return EACCES;
2190 }
2191 }
2192
2193 /*
2194 * If this open owner doesn't have an open
2195 * file structure yet, we create one for it.
2196 */
2197 if (!nofp && !*nofpp && !newnofp && alloc) {
2198 lck_mtx_unlock(&np->n_openlock);
2199alloc:
2200 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2201 if (!newnofp) {
2202 return ENOMEM;
2203 }
2204 bzero(newnofp, sizeof(*newnofp));
2205 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2206 newnofp->nof_owner = noop;
2207 nfs_open_owner_ref(noop);
2208 newnofp->nof_np = np;
2209 lck_mtx_lock(&noop->noo_lock);
2210 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2211 lck_mtx_unlock(&noop->noo_lock);
2212 if (np) {
2213 goto tryagain;
2214 }
2215 }
2216 if (!nofp) {
2217 if (*nofpp) {
2218 (*nofpp)->nof_np = np;
2219 nofp = *nofpp;
2220 } else {
2221 nofp = newnofp;
2222 }
2223 if (nofp && np) {
2224 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2225 }
2226 }
2227 if (np) {
2228 lck_mtx_unlock(&np->n_openlock);
2229 }
2230
2231 if (alloc && newnofp && (nofp != newnofp)) {
2232 nfs_open_file_destroy(newnofp);
2233 }
2234
2235 *nofpp = nofp;
2236 return nofp ? 0 : ESRCH;
2237}
2238
2239/*
2240 * Destroy an open file structure.
2241 */
2242void
2243nfs_open_file_destroy(struct nfs_open_file *nofp)
2244{
2245 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2246 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2247 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2248 nfs_open_owner_rele(nofp->nof_owner);
2249 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2250 FREE(nofp, M_TEMP);
2251}
2252
2253/*
2254 * Mark an open file as busy because we are about to
2255 * start an operation that uses and updates open file state.
2256 */
2257int
2258nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2259{
2260 struct nfsmount *nmp;
2261 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2262 int error = 0, slpflag;
2263
2264 nmp = nofp->nof_owner->noo_mount;
2265 if (nfs_mount_gone(nmp)) {
2266 return ENXIO;
2267 }
2268 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2269
2270 lck_mtx_lock(&nofp->nof_lock);
2271 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2272 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2273 break;
2274 }
2275 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2276 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2277 slpflag = 0;
2278 }
2279 if (!error) {
2280 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2281 }
2282 lck_mtx_unlock(&nofp->nof_lock);
2283
2284 return error;
2285}
2286
2287/*
2288 * Clear the busy flag on an open file and wake up anyone waiting
2289 * to mark it busy.
2290 */
2291void
2292nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2293{
2294 int wanted;
2295
2296 lck_mtx_lock(&nofp->nof_lock);
2297 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2298 panic("nfs_open_file_clear_busy");
2299 }
2300 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2301 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2302 lck_mtx_unlock(&nofp->nof_lock);
2303 if (wanted) {
2304 wakeup(nofp);
2305 }
2306}
2307
2308/*
2309 * Add the open state for the given access/deny modes to this open file.
2310 */
2311void
2312nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2313{
2314 lck_mtx_lock(&nofp->nof_lock);
2315 nofp->nof_access |= accessMode;
2316 nofp->nof_deny |= denyMode;
2317
2318 if (delegated) {
2319 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2320 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2321 nofp->nof_d_r++;
2322 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2323 nofp->nof_d_w++;
2324 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2325 nofp->nof_d_rw++;
2326 }
2327 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2328 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2329 nofp->nof_d_r_dw++;
2330 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2331 nofp->nof_d_w_dw++;
2332 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2333 nofp->nof_d_rw_dw++;
2334 }
2335 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2336 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2337 nofp->nof_d_r_drw++;
2338 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2339 nofp->nof_d_w_drw++;
2340 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2341 nofp->nof_d_rw_drw++;
2342 }
2343 }
2344 } else {
2345 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2346 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2347 nofp->nof_r++;
2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2349 nofp->nof_w++;
2350 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2351 nofp->nof_rw++;
2352 }
2353 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2354 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2355 nofp->nof_r_dw++;
2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2357 nofp->nof_w_dw++;
2358 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2359 nofp->nof_rw_dw++;
2360 }
2361 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2362 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2363 nofp->nof_r_drw++;
2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2365 nofp->nof_w_drw++;
2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2367 nofp->nof_rw_drw++;
2368 }
2369 }
2370 }
2371
2372 nofp->nof_opencnt++;
2373 lck_mtx_unlock(&nofp->nof_lock);
2374}
2375
2376/*
2377 * Find which particular open combo will be closed and report what
2378 * the new modes will be and whether the open was delegated.
2379 */
2380void
2381nfs_open_file_remove_open_find(
2382 struct nfs_open_file *nofp,
2383 uint32_t accessMode,
2384 uint32_t denyMode,
2385 uint32_t *newAccessMode,
2386 uint32_t *newDenyMode,
2387 int *delegated)
2388{
2389 /*
2390 * Calculate new modes: a mode bit gets removed when there's only
2391 * one count in all the corresponding counts
2392 */
2393 *newAccessMode = nofp->nof_access;
2394 *newDenyMode = nofp->nof_deny;
2395
2396 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2397 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2398 ((nofp->nof_r + nofp->nof_d_r +
2399 nofp->nof_rw + nofp->nof_d_rw +
2400 nofp->nof_r_dw + nofp->nof_d_r_dw +
2401 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2402 nofp->nof_r_drw + nofp->nof_d_r_drw +
2403 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2404 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2405 }
2406 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2407 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2408 ((nofp->nof_w + nofp->nof_d_w +
2409 nofp->nof_rw + nofp->nof_d_rw +
2410 nofp->nof_w_dw + nofp->nof_d_w_dw +
2411 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2412 nofp->nof_w_drw + nofp->nof_d_w_drw +
2413 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2414 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2415 }
2416 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2417 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2418 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2419 nofp->nof_w_drw + nofp->nof_d_w_drw +
2420 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2421 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2422 }
2423 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2424 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2425 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2426 nofp->nof_w_drw + nofp->nof_d_w_drw +
2427 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2428 nofp->nof_r_dw + nofp->nof_d_r_dw +
2429 nofp->nof_w_dw + nofp->nof_d_w_dw +
2430 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2431 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2432 }
2433
2434 /* Find the corresponding open access/deny mode counter. */
2435 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2436 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2437 *delegated = (nofp->nof_d_r != 0);
2438 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2439 *delegated = (nofp->nof_d_w != 0);
2440 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2441 *delegated = (nofp->nof_d_rw != 0);
2442 } else {
2443 *delegated = 0;
2444 }
2445 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2446 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2447 *delegated = (nofp->nof_d_r_dw != 0);
2448 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2449 *delegated = (nofp->nof_d_w_dw != 0);
2450 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2451 *delegated = (nofp->nof_d_rw_dw != 0);
2452 } else {
2453 *delegated = 0;
2454 }
2455 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2456 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2457 *delegated = (nofp->nof_d_r_drw != 0);
2458 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2459 *delegated = (nofp->nof_d_w_drw != 0);
2460 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2461 *delegated = (nofp->nof_d_rw_drw != 0);
2462 } else {
2463 *delegated = 0;
2464 }
2465 }
2466}
2467
2468/*
2469 * Remove the open state for the given access/deny modes to this open file.
2470 */
2471void
2472nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2473{
2474 uint32_t newAccessMode, newDenyMode;
2475 int delegated = 0;
2476
2477 lck_mtx_lock(&nofp->nof_lock);
2478 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2479
2480 /* Decrement the corresponding open access/deny mode counter. */
2481 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2482 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2483 if (delegated) {
2484 if (nofp->nof_d_r == 0) {
2485 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2486 } else {
2487 nofp->nof_d_r--;
2488 }
2489 } else {
2490 if (nofp->nof_r == 0) {
2491 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2492 } else {
2493 nofp->nof_r--;
2494 }
2495 }
2496 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2497 if (delegated) {
2498 if (nofp->nof_d_w == 0) {
2499 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2500 } else {
2501 nofp->nof_d_w--;
2502 }
2503 } else {
2504 if (nofp->nof_w == 0) {
2505 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2506 } else {
2507 nofp->nof_w--;
2508 }
2509 }
2510 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2511 if (delegated) {
2512 if (nofp->nof_d_rw == 0) {
2513 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2514 } else {
2515 nofp->nof_d_rw--;
2516 }
2517 } else {
2518 if (nofp->nof_rw == 0) {
2519 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2520 } else {
2521 nofp->nof_rw--;
2522 }
2523 }
2524 }
2525 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2526 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2527 if (delegated) {
2528 if (nofp->nof_d_r_dw == 0) {
2529 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2530 } else {
2531 nofp->nof_d_r_dw--;
2532 }
2533 } else {
2534 if (nofp->nof_r_dw == 0) {
2535 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2536 } else {
2537 nofp->nof_r_dw--;
2538 }
2539 }
2540 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2541 if (delegated) {
2542 if (nofp->nof_d_w_dw == 0) {
2543 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2544 } else {
2545 nofp->nof_d_w_dw--;
2546 }
2547 } else {
2548 if (nofp->nof_w_dw == 0) {
2549 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2550 } else {
2551 nofp->nof_w_dw--;
2552 }
2553 }
2554 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2555 if (delegated) {
2556 if (nofp->nof_d_rw_dw == 0) {
2557 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2558 } else {
2559 nofp->nof_d_rw_dw--;
2560 }
2561 } else {
2562 if (nofp->nof_rw_dw == 0) {
2563 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2564 } else {
2565 nofp->nof_rw_dw--;
2566 }
2567 }
2568 }
2569 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2570 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2571 if (delegated) {
2572 if (nofp->nof_d_r_drw == 0) {
2573 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2574 } else {
2575 nofp->nof_d_r_drw--;
2576 }
2577 } else {
2578 if (nofp->nof_r_drw == 0) {
2579 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2580 } else {
2581 nofp->nof_r_drw--;
2582 }
2583 }
2584 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2585 if (delegated) {
2586 if (nofp->nof_d_w_drw == 0) {
2587 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2588 } else {
2589 nofp->nof_d_w_drw--;
2590 }
2591 } else {
2592 if (nofp->nof_w_drw == 0) {
2593 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2594 } else {
2595 nofp->nof_w_drw--;
2596 }
2597 }
2598 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2599 if (delegated) {
2600 if (nofp->nof_d_rw_drw == 0) {
2601 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2602 } else {
2603 nofp->nof_d_rw_drw--;
2604 }
2605 } else {
2606 if (nofp->nof_rw_drw == 0) {
2607 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2608 } else {
2609 nofp->nof_rw_drw--;
2610 }
2611 }
2612 }
2613 }
2614
2615 /* update the modes */
2616 nofp->nof_access = newAccessMode;
2617 nofp->nof_deny = newDenyMode;
2618 nofp->nof_opencnt--;
2619 lck_mtx_unlock(&nofp->nof_lock);
2620}
2621
2622#if CONFIG_NFS4
2623/*
2624 * Get the current (delegation, lock, open, default) stateid for this node.
2625 * If node has a delegation, use that stateid.
2626 * If pid has a lock, use the lockowner's stateid.
2627 * Or use the open file's stateid.
2628 * If no open file, use a default stateid of all ones.
2629 */
2630void
2631nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2632{
2633 struct nfsmount *nmp = NFSTONMP(np);
2634 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2635 struct nfs_open_owner *noop = NULL;
2636 struct nfs_open_file *nofp = NULL;
2637 struct nfs_lock_owner *nlop = NULL;
2638 nfs_stateid *s = NULL;
2639
2640 if (np->n_openflags & N_DELEG_MASK) {
2641 s = &np->n_dstateid;
2642 } else {
2643 if (p) {
2644 nlop = nfs_lock_owner_find(np, p, 0);
2645 }
2646 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2647 /* we hold locks, use lock stateid */
2648 s = &nlop->nlo_stateid;
2649 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2650 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2651 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2652 nofp->nof_access) {
2653 /* we (should) have the file open, use open stateid */
2654 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2655 nfs4_reopen(nofp, thd);
2656 }
2657 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2658 s = &nofp->nof_stateid;
2659 }
2660 }
2661 }
2662
2663 if (s) {
2664 sid->seqid = s->seqid;
2665 sid->other[0] = s->other[0];
2666 sid->other[1] = s->other[1];
2667 sid->other[2] = s->other[2];
2668 } else {
2669 /* named attributes may not have a stateid for reads, so don't complain for them */
2670 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2671 NP(np, "nfs_get_stateid: no stateid");
2672 }
2673 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2674 }
2675 if (nlop) {
2676 nfs_lock_owner_rele(nlop);
2677 }
2678 if (noop) {
2679 nfs_open_owner_rele(noop);
2680 }
2681}
2682
2683
2684/*
2685 * When we have a delegation, we may be able to perform the OPEN locally.
2686 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2687 */
2688int
2689nfs4_open_delegated(
2690 nfsnode_t np,
2691 struct nfs_open_file *nofp,
2692 uint32_t accessMode,
2693 uint32_t denyMode,
2694 vfs_context_t ctx)
2695{
2696 int error = 0, ismember, readtoo = 0, authorized = 0;
2697 uint32_t action;
2698 struct kauth_acl_eval eval;
2699 kauth_cred_t cred = vfs_context_ucred(ctx);
2700
2701 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2702 /*
2703 * Try to open it for read access too,
2704 * so the buffer cache can read data.
2705 */
2706 readtoo = 1;
2707 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2708 }
2709
2710tryagain:
2711 action = 0;
2712 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2713 action |= KAUTH_VNODE_READ_DATA;
2714 }
2715 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2716 action |= KAUTH_VNODE_WRITE_DATA;
2717 }
2718
2719 /* evaluate ACE (if we have one) */
2720 if (np->n_dace.ace_flags) {
2721 eval.ae_requested = action;
2722 eval.ae_acl = &np->n_dace;
2723 eval.ae_count = 1;
2724 eval.ae_options = 0;
2725 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2726 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2727 }
2728 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2729 if (!error && ismember) {
2730 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2731 }
2732
2733 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2734 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2735 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2736 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2737
2738 error = kauth_acl_evaluate(cred, &eval);
2739
2740 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2741 authorized = 1;
2742 }
2743 }
2744
2745 if (!authorized) {
2746 /* need to ask the server via ACCESS */
2747 struct vnop_access_args naa;
2748 naa.a_desc = &vnop_access_desc;
2749 naa.a_vp = NFSTOV(np);
2750 naa.a_action = action;
2751 naa.a_context = ctx;
2752 if (!(error = nfs_vnop_access(&naa))) {
2753 authorized = 1;
2754 }
2755 }
2756
2757 if (!authorized) {
2758 if (readtoo) {
2759 /* try again without the extra read access */
2760 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2761 readtoo = 0;
2762 goto tryagain;
2763 }
2764 return error ? error : EACCES;
2765 }
2766
2767 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2768
2769 return 0;
2770}
2771
2772
2773/*
2774 * Open a file with the given access/deny modes.
2775 *
2776 * If we have a delegation, we may be able to handle the open locally.
2777 * Otherwise, we will always send the open RPC even if this open's mode is
2778 * a subset of all the existing opens. This makes sure that we will always
2779 * be able to do a downgrade to any of the open modes.
2780 *
2781 * Note: local conflicts should have already been checked in nfs_open_file_find().
2782 */
2783int
2784nfs4_open(
2785 nfsnode_t np,
2786 struct nfs_open_file *nofp,
2787 uint32_t accessMode,
2788 uint32_t denyMode,
2789 vfs_context_t ctx)
2790{
2791 vnode_t vp = NFSTOV(np);
2792 vnode_t dvp = NULL;
2793 struct componentname cn;
2794 const char *vname = NULL;
2795 size_t namelen;
2796 char smallname[128];
2797 char *filename = NULL;
2798 int error = 0, readtoo = 0;
2799
2800 /*
2801 * We can handle the OPEN ourselves if we have a delegation,
2802 * unless it's a read delegation and the open is asking for
2803 * either write access or deny read. We also don't bother to
2804 * use the delegation if it's being returned.
2805 */
2806 if (np->n_openflags & N_DELEG_MASK) {
2807 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2808 return error;
2809 }
2810 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2811 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2812 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2813 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2814 nfs_open_state_clear_busy(np);
2815 return error;
2816 }
2817 nfs_open_state_clear_busy(np);
2818 }
2819
2820 /*
2821 * [sigh] We can't trust VFS to get the parent right for named
2822 * attribute nodes. (It likes to reparent the nodes after we've
2823 * created them.) Luckily we can probably get the right parent
2824 * from the n_parent we have stashed away.
2825 */
2826 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2827 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2828 dvp = NULL;
2829 }
2830 if (!dvp) {
2831 dvp = vnode_getparent(vp);
2832 }
2833 vname = vnode_getname(vp);
2834 if (!dvp || !vname) {
2835 if (!error) {
2836 error = EIO;
2837 }
2838 goto out;
2839 }
2840 filename = &smallname[0];
2841 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2842 if (namelen >= sizeof(smallname)) {
2843 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
2844 if (!filename) {
2845 error = ENOMEM;
2846 goto out;
2847 }
2848 snprintf(filename, namelen + 1, "%s", vname);
2849 }
2850 bzero(&cn, sizeof(cn));
2851 cn.cn_nameptr = filename;
2852 cn.cn_namelen = namelen;
2853
2854 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2855 /*
2856 * Try to open it for read access too,
2857 * so the buffer cache can read data.
2858 */
2859 readtoo = 1;
2860 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2861 }
2862tryagain:
2863 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2864 if (error) {
2865 if (!nfs_mount_state_error_should_restart(error) &&
2866 (error != EINTR) && (error != ERESTART) && readtoo) {
2867 /* try again without the extra read access */
2868 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2869 readtoo = 0;
2870 goto tryagain;
2871 }
2872 goto out;
2873 }
2874 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2875out:
2876 if (filename && (filename != &smallname[0])) {
2877 FREE(filename, M_TEMP);
2878 }
2879 if (vname) {
2880 vnode_putname(vname);
2881 }
2882 if (dvp != NULLVP) {
2883 vnode_put(dvp);
2884 }
2885 return error;
2886}
2887#endif /* CONFIG_NFS4 */
2888
2889int
2890nfs_vnop_mmap(
2891 struct vnop_mmap_args /* {
2892 * struct vnodeop_desc *a_desc;
2893 * vnode_t a_vp;
2894 * int a_fflags;
2895 * vfs_context_t a_context;
2896 * } */*ap)
2897{
2898 vfs_context_t ctx = ap->a_context;
2899 vnode_t vp = ap->a_vp;
2900 nfsnode_t np = VTONFS(vp);
2901 int error = 0, accessMode, denyMode, delegated;
2902 struct nfsmount *nmp;
2903 struct nfs_open_owner *noop = NULL;
2904 struct nfs_open_file *nofp = NULL;
2905
2906 nmp = VTONMP(vp);
2907 if (nfs_mount_gone(nmp)) {
2908 return ENXIO;
2909 }
2910
2911 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2912 return EINVAL;
2913 }
2914 if (np->n_flag & NREVOKE) {
2915 return EIO;
2916 }
2917
2918 /*
2919 * fflags contains some combination of: PROT_READ, PROT_WRITE
2920 * Since it's not possible to mmap() without having the file open for reading,
2921 * read access is always there (regardless if PROT_READ is not set).
2922 */
2923 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2924 if (ap->a_fflags & PROT_WRITE) {
2925 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2926 }
2927 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2928
2929 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2930 if (!noop) {
2931 return ENOMEM;
2932 }
2933
2934restart:
2935 error = nfs_mount_state_in_use_start(nmp, NULL);
2936 if (error) {
2937 nfs_open_owner_rele(noop);
2938 return error;
2939 }
2940 if (np->n_flag & NREVOKE) {
2941 error = EIO;
2942 nfs_mount_state_in_use_end(nmp, 0);
2943 nfs_open_owner_rele(noop);
2944 return error;
2945 }
2946
2947 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2948 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2949 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2950 error = EPERM;
2951 }
2952#if CONFIG_NFS4
2953 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2954 nfs_mount_state_in_use_end(nmp, 0);
2955 error = nfs4_reopen(nofp, NULL);
2956 nofp = NULL;
2957 if (!error) {
2958 goto restart;
2959 }
2960 }
2961#endif
2962 if (!error) {
2963 error = nfs_open_file_set_busy(nofp, NULL);
2964 }
2965 if (error) {
2966 nofp = NULL;
2967 goto out;
2968 }
2969
2970 /*
2971 * The open reference for mmap must mirror an existing open because
2972 * we may need to reclaim it after the file is closed.
2973 * So grab another open count matching the accessMode passed in.
2974 * If we already had an mmap open, prefer read/write without deny mode.
2975 * This means we may have to drop the current mmap open first.
2976 *
2977 * N.B. We should have an open for the mmap, because, mmap was
2978 * called on an open descriptor, or we've created an open for read
2979 * from reading the first page for execve. However, if we piggy
2980 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2981 * that open may have closed.
2982 */
2983
2984 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2985 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2986 /* We shouldn't get here. We've already open the file for execve */
2987 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2988 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2989 }
2990 /*
2991 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2992 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2993 */
2994 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2995 /* not asking for just read access -> fail */
2996 error = EPERM;
2997 goto out;
2998 }
2999 /* we don't have the file open, so open it for read access */
3000 if (nmp->nm_vers < NFS_VER4) {
3001 /* NFS v2/v3 opens are always allowed - so just add it. */
3002 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
3003 error = 0;
3004 }
3005#if CONFIG_NFS4
3006 else {
3007 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3008 }
3009#endif
3010 if (!error) {
3011 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3012 }
3013 if (error) {
3014 goto out;
3015 }
3016 }
3017
3018 /* determine deny mode for open */
3019 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3020 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3021 delegated = 1;
3022 if (nofp->nof_d_rw) {
3023 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3024 } else if (nofp->nof_d_rw_dw) {
3025 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3026 } else if (nofp->nof_d_rw_drw) {
3027 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3028 }
3029 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3030 delegated = 0;
3031 if (nofp->nof_rw) {
3032 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3033 } else if (nofp->nof_rw_dw) {
3034 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3035 } else if (nofp->nof_rw_drw) {
3036 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3037 }
3038 } else {
3039 error = EPERM;
3040 }
3041 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3042 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3043 delegated = 1;
3044 if (nofp->nof_d_r) {
3045 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3046 } else if (nofp->nof_d_r_dw) {
3047 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3048 } else if (nofp->nof_d_r_drw) {
3049 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3050 }
3051 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3052 delegated = 0;
3053 if (nofp->nof_r) {
3054 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3055 } else if (nofp->nof_r_dw) {
3056 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3057 } else if (nofp->nof_r_drw) {
3058 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3059 }
3060 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3061 /*
3062 * This clause and the one below is to co-opt a read write access
3063 * for a read only mmaping. We probably got here in that an
3064 * existing rw open for an executable file already exists.
3065 */
3066 delegated = 1;
3067 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3068 if (nofp->nof_d_rw) {
3069 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3070 } else if (nofp->nof_d_rw_dw) {
3071 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3072 } else if (nofp->nof_d_rw_drw) {
3073 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3074 }
3075 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3076 delegated = 0;
3077 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3078 if (nofp->nof_rw) {
3079 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3080 } else if (nofp->nof_rw_dw) {
3081 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3082 } else if (nofp->nof_rw_drw) {
3083 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3084 }
3085 } else {
3086 error = EPERM;
3087 }
3088 }
3089 if (error) { /* mmap mode without proper open mode */
3090 goto out;
3091 }
3092
3093 /*
3094 * If the existing mmap access is more than the new access OR the
3095 * existing access is the same and the existing deny mode is less,
3096 * then we'll stick with the existing mmap open mode.
3097 */
3098 if ((nofp->nof_mmap_access > accessMode) ||
3099 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3100 goto out;
3101 }
3102
3103 /* update mmap open mode */
3104 if (nofp->nof_mmap_access) {
3105 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3106 if (error) {
3107 if (!nfs_mount_state_error_should_restart(error)) {
3108 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3109 }
3110 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3111 goto out;
3112 }
3113 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3114 }
3115
3116 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3117 nofp->nof_mmap_access = accessMode;
3118 nofp->nof_mmap_deny = denyMode;
3119
3120out:
3121 if (nofp) {
3122 nfs_open_file_clear_busy(nofp);
3123 }
3124 if (nfs_mount_state_in_use_end(nmp, error)) {
3125 nofp = NULL;
3126 goto restart;
3127 }
3128 if (noop) {
3129 nfs_open_owner_rele(noop);
3130 }
3131
3132 if (!error) {
3133 int ismapped = 0;
3134 nfs_node_lock_force(np);
3135 if ((np->n_flag & NISMAPPED) == 0) {
3136 np->n_flag |= NISMAPPED;
3137 ismapped = 1;
3138 }
3139 nfs_node_unlock(np);
3140 if (ismapped) {
3141 lck_mtx_lock(&nmp->nm_lock);
3142 nmp->nm_state &= ~NFSSTA_SQUISHY;
3143 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3144 if (nmp->nm_curdeadtimeout <= 0) {
3145 nmp->nm_deadto_start = 0;
3146 }
3147 nmp->nm_mappers++;
3148 lck_mtx_unlock(&nmp->nm_lock);
3149 }
3150 }
3151
3152 return error;
3153}
3154
3155
3156int
3157nfs_vnop_mnomap(
3158 struct vnop_mnomap_args /* {
3159 * struct vnodeop_desc *a_desc;
3160 * vnode_t a_vp;
3161 * vfs_context_t a_context;
3162 * } */*ap)
3163{
3164 vfs_context_t ctx = ap->a_context;
3165 vnode_t vp = ap->a_vp;
3166 nfsnode_t np = VTONFS(vp);
3167 struct nfsmount *nmp;
3168 struct nfs_open_file *nofp = NULL;
3169 off_t size;
3170 int error;
3171 int is_mapped_flag = 0;
3172
3173 nmp = VTONMP(vp);
3174 if (nfs_mount_gone(nmp)) {
3175 return ENXIO;
3176 }
3177
3178 nfs_node_lock_force(np);
3179 if (np->n_flag & NISMAPPED) {
3180 is_mapped_flag = 1;
3181 np->n_flag &= ~NISMAPPED;
3182 }
3183 nfs_node_unlock(np);
3184 if (is_mapped_flag) {
3185 lck_mtx_lock(&nmp->nm_lock);
3186 if (nmp->nm_mappers) {
3187 nmp->nm_mappers--;
3188 } else {
3189 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3190 }
3191 lck_mtx_unlock(&nmp->nm_lock);
3192 }
3193
3194 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3195 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3196 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3197 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3198 }
3199
3200 /* walk all open files and close all mmap opens */
3201loop:
3202 error = nfs_mount_state_in_use_start(nmp, NULL);
3203 if (error) {
3204 return error;
3205 }
3206 lck_mtx_lock(&np->n_openlock);
3207 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3208 if (!nofp->nof_mmap_access) {
3209 continue;
3210 }
3211 lck_mtx_unlock(&np->n_openlock);
3212#if CONFIG_NFS4
3213 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3214 nfs_mount_state_in_use_end(nmp, 0);
3215 error = nfs4_reopen(nofp, NULL);
3216 if (!error) {
3217 goto loop;
3218 }
3219 }
3220#endif
3221 if (!error) {
3222 error = nfs_open_file_set_busy(nofp, NULL);
3223 }
3224 if (error) {
3225 lck_mtx_lock(&np->n_openlock);
3226 break;
3227 }
3228 if (nofp->nof_mmap_access) {
3229 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3230 if (!nfs_mount_state_error_should_restart(error)) {
3231 if (error) { /* not a state-operation-restarting error, so just clear the access */
3232 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3233 }
3234 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3235 }
3236 if (error) {
3237 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3238 }
3239 }
3240 nfs_open_file_clear_busy(nofp);
3241 nfs_mount_state_in_use_end(nmp, error);
3242 goto loop;
3243 }
3244 lck_mtx_unlock(&np->n_openlock);
3245 nfs_mount_state_in_use_end(nmp, error);
3246 return error;
3247}
3248
3249/*
3250 * Search a node's lock owner list for the owner for this process.
3251 * If not found and "alloc" is set, then allocate a new one.
3252 */
3253struct nfs_lock_owner *
3254nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3255{
3256 pid_t pid = proc_pid(p);
3257 struct nfs_lock_owner *nlop, *newnlop = NULL;
3258
3259tryagain:
3260 lck_mtx_lock(&np->n_openlock);
3261 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3262 os_ref_count_t newcount;
3263
3264 if (nlop->nlo_pid != pid) {
3265 continue;
3266 }
3267 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
3268 break;
3269 }
3270 /* stale lock owner... reuse it if we can */
3271 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3272 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3273 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3274 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3275 lck_mtx_unlock(&np->n_openlock);
3276 goto tryagain;
3277 }
3278 nlop->nlo_pid_start = p->p_start;
3279 nlop->nlo_seqid = 0;
3280 nlop->nlo_stategenid = 0;
3281 break;
3282 }
3283
3284 if (!nlop && !newnlop && alloc) {
3285 lck_mtx_unlock(&np->n_openlock);
3286 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3287 if (!newnlop) {
3288 return NULL;
3289 }
3290 bzero(newnlop, sizeof(*newnlop));
3291 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3292 newnlop->nlo_pid = pid;
3293 newnlop->nlo_pid_start = p->p_start;
3294 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3295 TAILQ_INIT(&newnlop->nlo_locks);
3296 goto tryagain;
3297 }
3298 if (!nlop && newnlop) {
3299 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3300 os_ref_init(&newnlop->nlo_refcnt, NULL);
3301 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3302 nlop = newnlop;
3303 }
3304 lck_mtx_unlock(&np->n_openlock);
3305
3306 if (newnlop && (nlop != newnlop)) {
3307 nfs_lock_owner_destroy(newnlop);
3308 }
3309
3310 if (nlop) {
3311 nfs_lock_owner_ref(nlop);
3312 }
3313
3314 return nlop;
3315}
3316
3317/*
3318 * destroy a lock owner that's no longer needed
3319 */
3320void
3321nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3322{
3323 if (nlop->nlo_open_owner) {
3324 nfs_open_owner_rele(nlop->nlo_open_owner);
3325 nlop->nlo_open_owner = NULL;
3326 }
3327 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3328 FREE(nlop, M_TEMP);
3329}
3330
3331/*
3332 * acquire a reference count on a lock owner
3333 */
3334void
3335nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3336{
3337 lck_mtx_lock(&nlop->nlo_lock);
3338 os_ref_retain_locked(&nlop->nlo_refcnt);
3339 lck_mtx_unlock(&nlop->nlo_lock);
3340}
3341
3342/*
3343 * drop a reference count on a lock owner and destroy it if
3344 * it is no longer referenced and no longer on the mount's list.
3345 */
3346void
3347nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3348{
3349 os_ref_count_t newcount;
3350
3351 lck_mtx_lock(&nlop->nlo_lock);
3352 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3353 panic("nfs_lock_owner_rele: no refcnt");
3354 }
3355 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3356 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3357 panic("nfs_lock_owner_rele: busy");
3358 }
3359 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3360 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3361 lck_mtx_unlock(&nlop->nlo_lock);
3362 return;
3363 }
3364 /* owner is no longer referenced or linked to mount, so destroy it */
3365 lck_mtx_unlock(&nlop->nlo_lock);
3366 nfs_lock_owner_destroy(nlop);
3367}
3368
3369/*
3370 * Mark a lock owner as busy because we are about to
3371 * start an operation that uses and updates lock owner state.
3372 */
3373int
3374nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3375{
3376 struct nfsmount *nmp;
3377 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3378 int error = 0, slpflag;
3379
3380 nmp = nlop->nlo_open_owner->noo_mount;
3381 if (nfs_mount_gone(nmp)) {
3382 return ENXIO;
3383 }
3384 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3385
3386 lck_mtx_lock(&nlop->nlo_lock);
3387 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3388 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3389 break;
3390 }
3391 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3392 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3393 slpflag = 0;
3394 }
3395 if (!error) {
3396 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3397 }
3398 lck_mtx_unlock(&nlop->nlo_lock);
3399
3400 return error;
3401}
3402
3403/*
3404 * Clear the busy flag on a lock owner and wake up anyone waiting
3405 * to mark it busy.
3406 */
3407void
3408nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3409{
3410 int wanted;
3411
3412 lck_mtx_lock(&nlop->nlo_lock);
3413 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3414 panic("nfs_lock_owner_clear_busy");
3415 }
3416 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3417 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3418 lck_mtx_unlock(&nlop->nlo_lock);
3419 if (wanted) {
3420 wakeup(nlop);
3421 }
3422}
3423
3424/*
3425 * Insert a held lock into a lock owner's sorted list.
3426 * (flock locks are always inserted at the head the list)
3427 */
3428void
3429nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3430{
3431 struct nfs_file_lock *nflp;
3432
3433 /* insert new lock in lock owner's held lock list */
3434 lck_mtx_lock(&nlop->nlo_lock);
3435 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3436 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3437 } else {
3438 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3439 if (newnflp->nfl_start < nflp->nfl_start) {
3440 break;
3441 }
3442 }
3443 if (nflp) {
3444 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3445 } else {
3446 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3447 }
3448 }
3449 lck_mtx_unlock(&nlop->nlo_lock);
3450}
3451
3452/*
3453 * Get a file lock structure for this lock owner.
3454 */
3455struct nfs_file_lock *
3456nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3457{
3458 struct nfs_file_lock *nflp = NULL;
3459
3460 lck_mtx_lock(&nlop->nlo_lock);
3461 if (!nlop->nlo_alock.nfl_owner) {
3462 nflp = &nlop->nlo_alock;
3463 nflp->nfl_owner = nlop;
3464 }
3465 lck_mtx_unlock(&nlop->nlo_lock);
3466 if (!nflp) {
3467 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3468 if (!nflp) {
3469 return NULL;
3470 }
3471 bzero(nflp, sizeof(*nflp));
3472 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3473 nflp->nfl_owner = nlop;
3474 }
3475 nfs_lock_owner_ref(nlop);
3476 return nflp;
3477}
3478
3479/*
3480 * destroy the given NFS file lock structure
3481 */
3482void
3483nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3484{
3485 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3486
3487 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3488 nflp->nfl_owner = NULL;
3489 FREE(nflp, M_TEMP);
3490 } else {
3491 lck_mtx_lock(&nlop->nlo_lock);
3492 bzero(nflp, sizeof(*nflp));
3493 lck_mtx_unlock(&nlop->nlo_lock);
3494 }
3495 nfs_lock_owner_rele(nlop);
3496}
3497
3498/*
3499 * Check if one file lock conflicts with another.
3500 * (nflp1 is the new lock. nflp2 is the existing lock.)
3501 */
3502int
3503nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3504{
3505 /* no conflict if lock is dead */
3506 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3507 return 0;
3508 }
3509 /* no conflict if it's ours - unless the lock style doesn't match */
3510 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3511 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3512 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3513 (nflp1->nfl_start > nflp2->nfl_start) &&
3514 (nflp1->nfl_end < nflp2->nfl_end)) {
3515 *willsplit = 1;
3516 }
3517 return 0;
3518 }
3519 /* no conflict if ranges don't overlap */
3520 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3521 return 0;
3522 }
3523 /* no conflict if neither lock is exclusive */
3524 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3525 return 0;
3526 }
3527 /* conflict */
3528 return 1;
3529}
3530
3531#if CONFIG_NFS4
3532/*
3533 * Send an NFSv4 LOCK RPC to the server.
3534 */
3535int
3536nfs4_setlock_rpc(
3537 nfsnode_t np,
3538 struct nfs_open_file *nofp,
3539 struct nfs_file_lock *nflp,
3540 int reclaim,
3541 int flags,
3542 thread_t thd,
3543 kauth_cred_t cred)
3544{
3545 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3546 struct nfsmount *nmp;
3547 struct nfsm_chain nmreq, nmrep;
3548 uint64_t xid;
3549 uint32_t locktype;
3550 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3551 struct nfsreq_secinfo_args si;
3552
3553 nmp = NFSTONMP(np);
3554 if (nfs_mount_gone(nmp)) {
3555 return ENXIO;
3556 }
3557 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3558 return EINVAL;
3559 }
3560
3561 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3562 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3563 ((nflp->nfl_type == F_WRLCK) ?
3564 NFS_LOCK_TYPE_WRITEW :
3565 NFS_LOCK_TYPE_READW) :
3566 ((nflp->nfl_type == F_WRLCK) ?
3567 NFS_LOCK_TYPE_WRITE :
3568 NFS_LOCK_TYPE_READ);
3569 if (newlocker) {
3570 error = nfs_open_file_set_busy(nofp, thd);
3571 if (error) {
3572 return error;
3573 }
3574 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3575 if (error) {
3576 nfs_open_file_clear_busy(nofp);
3577 return error;
3578 }
3579 if (!nlop->nlo_open_owner) {
3580 nfs_open_owner_ref(nofp->nof_owner);
3581 nlop->nlo_open_owner = nofp->nof_owner;
3582 }
3583 }
3584 error = nfs_lock_owner_set_busy(nlop, thd);
3585 if (error) {
3586 if (newlocker) {
3587 nfs_open_owner_clear_busy(nofp->nof_owner);
3588 nfs_open_file_clear_busy(nofp);
3589 }
3590 return error;
3591 }
3592
3593 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3594 nfsm_chain_null(&nmreq);
3595 nfsm_chain_null(&nmrep);
3596
3597 // PUTFH, GETATTR, LOCK
3598 numops = 3;
3599 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3600 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3601 numops--;
3602 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3603 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3604 numops--;
3605 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3606 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3607 numops--;
3608 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3609 nfsm_chain_add_32(error, &nmreq, locktype);
3610 nfsm_chain_add_32(error, &nmreq, reclaim);
3611 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3612 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3613 nfsm_chain_add_32(error, &nmreq, newlocker);
3614 if (newlocker) {
3615 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3616 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3617 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3618 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3619 } else {
3620 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3621 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3622 }
3623 nfsm_chain_build_done(error, &nmreq);
3624 nfsm_assert(error, (numops == 0), EPROTO);
3625 nfsmout_if(error);
3626
3627 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3628
3629 if ((lockerror = nfs_node_lock(np))) {
3630 error = lockerror;
3631 }
3632 nfsm_chain_skip_tag(error, &nmrep);
3633 nfsm_chain_get_32(error, &nmrep, numops);
3634 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3635 nfsmout_if(error);
3636 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3637 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3638 nfsmout_if(error);
3639 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3640 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3641 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3642
3643 /* Update the lock owner's stategenid once it appears the server has state for it. */
3644 /* We determine this by noting the request was successful (we got a stateid). */
3645 if (newlocker && !error) {
3646 nlop->nlo_stategenid = nmp->nm_stategenid;
3647 }
3648nfsmout:
3649 if (!lockerror) {
3650 nfs_node_unlock(np);
3651 }
3652 nfs_lock_owner_clear_busy(nlop);
3653 if (newlocker) {
3654 nfs_open_owner_clear_busy(nofp->nof_owner);
3655 nfs_open_file_clear_busy(nofp);
3656 }
3657 nfsm_chain_cleanup(&nmreq);
3658 nfsm_chain_cleanup(&nmrep);
3659 return error;
3660}
3661
3662/*
3663 * Send an NFSv4 LOCKU RPC to the server.
3664 */
3665int
3666nfs4_unlock_rpc(
3667 nfsnode_t np,
3668 struct nfs_lock_owner *nlop,
3669 int type,
3670 uint64_t start,
3671 uint64_t end,
3672 int flags,
3673 thread_t thd,
3674 kauth_cred_t cred)
3675{
3676 struct nfsmount *nmp;
3677 struct nfsm_chain nmreq, nmrep;
3678 uint64_t xid;
3679 int error = 0, lockerror = ENOENT, numops, status;
3680 struct nfsreq_secinfo_args si;
3681
3682 nmp = NFSTONMP(np);
3683 if (nfs_mount_gone(nmp)) {
3684 return ENXIO;
3685 }
3686 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3687 return EINVAL;
3688 }
3689
3690 error = nfs_lock_owner_set_busy(nlop, NULL);
3691 if (error) {
3692 return error;
3693 }
3694
3695 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3696 nfsm_chain_null(&nmreq);
3697 nfsm_chain_null(&nmrep);
3698
3699 // PUTFH, GETATTR, LOCKU
3700 numops = 3;
3701 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3702 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3703 numops--;
3704 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3705 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3706 numops--;
3707 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3708 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3709 numops--;
3710 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3711 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3712 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3713 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3714 nfsm_chain_add_64(error, &nmreq, start);
3715 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3716 nfsm_chain_build_done(error, &nmreq);
3717 nfsm_assert(error, (numops == 0), EPROTO);
3718 nfsmout_if(error);
3719
3720 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3721
3722 if ((lockerror = nfs_node_lock(np))) {
3723 error = lockerror;
3724 }
3725 nfsm_chain_skip_tag(error, &nmrep);
3726 nfsm_chain_get_32(error, &nmrep, numops);
3727 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3728 nfsmout_if(error);
3729 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3730 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3731 nfsmout_if(error);
3732 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3733 nfs_owner_seqid_increment(NULL, nlop, error);
3734 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3735nfsmout:
3736 if (!lockerror) {
3737 nfs_node_unlock(np);
3738 }
3739 nfs_lock_owner_clear_busy(nlop);
3740 nfsm_chain_cleanup(&nmreq);
3741 nfsm_chain_cleanup(&nmrep);
3742 return error;
3743}
3744
3745/*
3746 * Send an NFSv4 LOCKT RPC to the server.
3747 */
3748int
3749nfs4_getlock_rpc(
3750 nfsnode_t np,
3751 struct nfs_lock_owner *nlop,
3752 struct flock *fl,
3753 uint64_t start,
3754 uint64_t end,
3755 vfs_context_t ctx)
3756{
3757 struct nfsmount *nmp;
3758 struct nfsm_chain nmreq, nmrep;
3759 uint64_t xid, val64 = 0;
3760 uint32_t val = 0;
3761 int error = 0, lockerror, numops, status;
3762 struct nfsreq_secinfo_args si;
3763
3764 nmp = NFSTONMP(np);
3765 if (nfs_mount_gone(nmp)) {
3766 return ENXIO;
3767 }
3768 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3769 return EINVAL;
3770 }
3771
3772 lockerror = ENOENT;
3773 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3774 nfsm_chain_null(&nmreq);
3775 nfsm_chain_null(&nmrep);
3776
3777 // PUTFH, GETATTR, LOCKT
3778 numops = 3;
3779 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3780 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3781 numops--;
3782 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3783 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3784 numops--;
3785 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3786 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3787 numops--;
3788 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3789 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3790 nfsm_chain_add_64(error, &nmreq, start);
3791 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3792 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3793 nfsm_chain_build_done(error, &nmreq);
3794 nfsm_assert(error, (numops == 0), EPROTO);
3795 nfsmout_if(error);
3796
3797 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3798
3799 if ((lockerror = nfs_node_lock(np))) {
3800 error = lockerror;
3801 }
3802 nfsm_chain_skip_tag(error, &nmrep);
3803 nfsm_chain_get_32(error, &nmrep, numops);
3804 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3805 nfsmout_if(error);
3806 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3807 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3808 nfsmout_if(error);
3809 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3810 if (error == NFSERR_DENIED) {
3811 error = 0;
3812 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3813 nfsm_chain_get_64(error, &nmrep, val64);
3814 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3815 nfsm_chain_get_32(error, &nmrep, val);
3816 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3817 fl->l_pid = 0;
3818 fl->l_whence = SEEK_SET;
3819 } else if (!error) {
3820 fl->l_type = F_UNLCK;
3821 }
3822nfsmout:
3823 if (!lockerror) {
3824 nfs_node_unlock(np);
3825 }
3826 nfsm_chain_cleanup(&nmreq);
3827 nfsm_chain_cleanup(&nmrep);
3828 return error;
3829}
3830#endif /* CONFIG_NFS4 */
3831
3832/*
3833 * Check for any conflicts with the given lock.
3834 *
3835 * Checking for a lock doesn't require the file to be opened.
3836 * So we skip all the open owner, open file, lock owner work
3837 * and just check for a conflicting lock.
3838 */
3839int
3840nfs_advlock_getlock(
3841 nfsnode_t np,
3842 struct nfs_lock_owner *nlop,
3843 struct flock *fl,
3844 uint64_t start,
3845 uint64_t end,
3846 vfs_context_t ctx)
3847{
3848 struct nfsmount *nmp;
3849 struct nfs_file_lock *nflp;
3850 int error = 0, answered = 0;
3851
3852 nmp = NFSTONMP(np);
3853 if (nfs_mount_gone(nmp)) {
3854 return ENXIO;
3855 }
3856
3857restart:
3858 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3859 return error;
3860 }
3861
3862 lck_mtx_lock(&np->n_openlock);
3863 /* scan currently held locks for conflict */
3864 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3865 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3866 continue;
3867 }
3868 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3869 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3870 break;
3871 }
3872 }
3873 if (nflp) {
3874 /* found a conflicting lock */
3875 fl->l_type = nflp->nfl_type;
3876 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3877 fl->l_start = nflp->nfl_start;
3878 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3879 fl->l_whence = SEEK_SET;
3880 answered = 1;
3881 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3882 /*
3883 * If we have a write delegation, we know there can't be other
3884 * locks on the server. So the answer is no conflicting lock found.
3885 */
3886 fl->l_type = F_UNLCK;
3887 answered = 1;
3888 }
3889 lck_mtx_unlock(&np->n_openlock);
3890 if (answered) {
3891 nfs_mount_state_in_use_end(nmp, 0);
3892 return 0;
3893 }
3894
3895 /* no conflict found locally, so ask the server */
3896 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3897
3898 if (nfs_mount_state_in_use_end(nmp, error)) {
3899 goto restart;
3900 }
3901 return error;
3902}
3903
3904/*
3905 * Acquire a file lock for the given range.
3906 *
3907 * Add the lock (request) to the lock queue.
3908 * Scan the lock queue for any conflicting locks.
3909 * If a conflict is found, block or return an error.
3910 * Once end of queue is reached, send request to the server.
3911 * If the server grants the lock, scan the lock queue and
3912 * update any existing locks. Then (optionally) scan the
3913 * queue again to coalesce any locks adjacent to the new one.
3914 */
3915int
3916nfs_advlock_setlock(
3917 nfsnode_t np,
3918 struct nfs_open_file *nofp,
3919 struct nfs_lock_owner *nlop,
3920 int op,
3921 uint64_t start,
3922 uint64_t end,
3923 int style,
3924 short type,
3925 vfs_context_t ctx)
3926{
3927 struct nfsmount *nmp;
3928 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3929 struct nfs_file_lock *coalnflp;
3930 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3931 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
3932
3933 nmp = NFSTONMP(np);
3934 if (nfs_mount_gone(nmp)) {
3935 return ENXIO;
3936 }
3937 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3938
3939 if ((type != F_RDLCK) && (type != F_WRLCK)) {
3940 return EINVAL;
3941 }
3942
3943 /* allocate a new lock */
3944 newnflp = nfs_file_lock_alloc(nlop);
3945 if (!newnflp) {
3946 return ENOLCK;
3947 }
3948 newnflp->nfl_start = start;
3949 newnflp->nfl_end = end;
3950 newnflp->nfl_type = type;
3951 if (op == F_SETLKW) {
3952 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3953 }
3954 newnflp->nfl_flags |= style;
3955 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3956
3957 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3958 /*
3959 * For exclusive flock-style locks, if we block waiting for the
3960 * lock, we need to first release any currently held shared
3961 * flock-style lock. So, the first thing we do is check if we
3962 * have a shared flock-style lock.
3963 */
3964 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3965 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
3966 nflp = NULL;
3967 }
3968 if (nflp && (nflp->nfl_type != F_RDLCK)) {
3969 nflp = NULL;
3970 }
3971 flocknflp = nflp;
3972 }
3973
3974restart:
3975 restart = 0;
3976 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3977 if (error) {
3978 goto error_out;
3979 }
3980 inuse = 1;
3981 if (np->n_flag & NREVOKE) {
3982 error = EIO;
3983 nfs_mount_state_in_use_end(nmp, 0);
3984 inuse = 0;
3985 goto error_out;
3986 }
3987#if CONFIG_NFS4
3988 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3989 nfs_mount_state_in_use_end(nmp, 0);
3990 inuse = 0;
3991 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3992 if (error) {
3993 goto error_out;
3994 }
3995 goto restart;
3996 }
3997#endif
3998
3999 lck_mtx_lock(&np->n_openlock);
4000 if (!inqueue) {
4001 /* insert new lock at beginning of list */
4002 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4003 inqueue = 1;
4004 }
4005
4006 /* scan current list of locks (held and pending) for conflicts */
4007 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4008 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4009 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
4010 continue;
4011 }
4012 /* Conflict */
4013 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4014 error = EAGAIN;
4015 break;
4016 }
4017 /* Block until this lock is no longer held. */
4018 if (nflp->nfl_blockcnt == UINT_MAX) {
4019 error = ENOLCK;
4020 break;
4021 }
4022 nflp->nfl_blockcnt++;
4023 do {
4024 if (flocknflp) {
4025 /* release any currently held shared lock before sleeping */
4026 lck_mtx_unlock(&np->n_openlock);
4027 nfs_mount_state_in_use_end(nmp, 0);
4028 inuse = 0;
4029 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4030 flocknflp = NULL;
4031 if (!error) {
4032 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4033 }
4034 if (error) {
4035 lck_mtx_lock(&np->n_openlock);
4036 break;
4037 }
4038 inuse = 1;
4039 lck_mtx_lock(&np->n_openlock);
4040 /* no need to block/sleep if the conflict is gone */
4041 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4042 break;
4043 }
4044 }
4045 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4046 slpflag = 0;
4047 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4048 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4049 /* looks like we have a recover pending... restart */
4050 restart = 1;
4051 lck_mtx_unlock(&np->n_openlock);
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 inuse = 0;
4054 lck_mtx_lock(&np->n_openlock);
4055 break;
4056 }
4057 if (!error && (np->n_flag & NREVOKE)) {
4058 error = EIO;
4059 }
4060 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4061 nflp->nfl_blockcnt--;
4062 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4063 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4064 nfs_file_lock_destroy(nflp);
4065 }
4066 if (error || restart) {
4067 break;
4068 }
4069 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4070 /* So, start this lock-scanning loop over from where it started. */
4071 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4072 }
4073 lck_mtx_unlock(&np->n_openlock);
4074 if (restart) {
4075 goto restart;
4076 }
4077 if (error) {
4078 goto error_out;
4079 }
4080
4081 if (willsplit) {
4082 /*
4083 * It looks like this operation is splitting a lock.
4084 * We allocate a new lock now so we don't have to worry
4085 * about the allocation failing after we've updated some state.
4086 */
4087 nflp2 = nfs_file_lock_alloc(nlop);
4088 if (!nflp2) {
4089 error = ENOLCK;
4090 goto error_out;
4091 }
4092 }
4093
4094 /* once scan for local conflicts is clear, send request to server */
4095 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4096 goto error_out;
4097 }
4098 busy = 1;
4099 delay = 0;
4100 do {
4101#if CONFIG_NFS4
4102 /* do we have a delegation? (that we're not returning?) */
4103 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4104 if (np->n_openflags & N_DELEG_WRITE) {
4105 /* with a write delegation, just take the lock delegated */
4106 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4107 error = 0;
4108 /* make sure the lock owner knows its open owner */
4109 if (!nlop->nlo_open_owner) {
4110 nfs_open_owner_ref(nofp->nof_owner);
4111 nlop->nlo_open_owner = nofp->nof_owner;
4112 }
4113 break;
4114 } else {
4115 /*
4116 * If we don't have any non-delegated opens but we do have
4117 * delegated opens, then we need to first claim the delegated
4118 * opens so that the lock request on the server can be associated
4119 * with an open it knows about.
4120 */
4121 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4122 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4123 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4124 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4125 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4126 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4127 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4128 if (error) {
4129 break;
4130 }
4131 }
4132 }
4133 }
4134#endif
4135 if (np->n_flag & NREVOKE) {
4136 error = EIO;
4137 }
4138 if (!error) {
4139 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4140 }
4141 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4142 break;
4143 }
4144 /* request was denied due to either conflict or grace period */
4145 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4146 error = EAGAIN;
4147 break;
4148 }
4149 if (flocknflp) {
4150 /* release any currently held shared lock before sleeping */
4151 nfs_open_state_clear_busy(np);
4152 busy = 0;
4153 nfs_mount_state_in_use_end(nmp, 0);
4154 inuse = 0;
4155 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4156 flocknflp = NULL;
4157 if (!error2) {
4158 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4159 }
4160 if (!error2) {
4161 inuse = 1;
4162 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4163 }
4164 if (error2) {
4165 error = error2;
4166 break;
4167 }
4168 busy = 1;
4169 }
4170 /*
4171 * Wait a little bit and send the request again.
4172 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4173 */
4174 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4175 if (error == NFSERR_GRACE) {
4176 delay = 4;
4177 }
4178 if (delay < 4) {
4179 delay++;
4180 }
4181 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4182 slpflag = 0;
4183 }
4184 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4185 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4186 /* looks like we have a recover pending... restart */
4187 nfs_open_state_clear_busy(np);
4188 busy = 0;
4189 nfs_mount_state_in_use_end(nmp, 0);
4190 inuse = 0;
4191 goto restart;
4192 }
4193 if (!error && (np->n_flag & NREVOKE)) {
4194 error = EIO;
4195 }
4196 } while (!error);
4197
4198error_out:
4199 if (nfs_mount_state_error_should_restart(error)) {
4200 /* looks like we need to restart this operation */
4201 if (busy) {
4202 nfs_open_state_clear_busy(np);
4203 busy = 0;
4204 }
4205 if (inuse) {
4206 nfs_mount_state_in_use_end(nmp, error);
4207 inuse = 0;
4208 }
4209 goto restart;
4210 }
4211 lck_mtx_lock(&np->n_openlock);
4212 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4213 if (error) {
4214 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4215 if (newnflp->nfl_blockcnt) {
4216 /* wake up anyone blocked on this lock */
4217 wakeup(newnflp);
4218 } else {
4219 /* remove newnflp from lock list and destroy */
4220 if (inqueue) {
4221 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4222 }
4223 nfs_file_lock_destroy(newnflp);
4224 }
4225 lck_mtx_unlock(&np->n_openlock);
4226 if (busy) {
4227 nfs_open_state_clear_busy(np);
4228 }
4229 if (inuse) {
4230 nfs_mount_state_in_use_end(nmp, error);
4231 }
4232 if (nflp2) {
4233 nfs_file_lock_destroy(nflp2);
4234 }
4235 return error;
4236 }
4237
4238 /* server granted the lock */
4239
4240 /*
4241 * Scan for locks to update.
4242 *
4243 * Locks completely covered are killed.
4244 * At most two locks may need to be clipped.
4245 * It's possible that a single lock may need to be split.
4246 */
4247 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4248 if (nflp == newnflp) {
4249 continue;
4250 }
4251 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4252 continue;
4253 }
4254 if (nflp->nfl_owner != nlop) {
4255 continue;
4256 }
4257 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4258 continue;
4259 }
4260 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4261 continue;
4262 }
4263 /* here's one to update */
4264 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4265 /* The entire lock is being replaced. */
4266 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4267 lck_mtx_lock(&nlop->nlo_lock);
4268 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4269 lck_mtx_unlock(&nlop->nlo_lock);
4270 /* lock will be destroyed below, if no waiters */
4271 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4272 /* We're replacing a range in the middle of a lock. */
4273 /* The current lock will be split into two locks. */
4274 /* Update locks and insert new lock after current lock. */
4275 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4276 nflp2->nfl_type = nflp->nfl_type;
4277 nflp2->nfl_start = newnflp->nfl_end + 1;
4278 nflp2->nfl_end = nflp->nfl_end;
4279 nflp->nfl_end = newnflp->nfl_start - 1;
4280 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4281 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4282 nextnflp = nflp2;
4283 nflp2 = NULL;
4284 } else if (newnflp->nfl_start > nflp->nfl_start) {
4285 /* We're replacing the end of a lock. */
4286 nflp->nfl_end = newnflp->nfl_start - 1;
4287 } else if (newnflp->nfl_end < nflp->nfl_end) {
4288 /* We're replacing the start of a lock. */
4289 nflp->nfl_start = newnflp->nfl_end + 1;
4290 }
4291 if (nflp->nfl_blockcnt) {
4292 /* wake up anyone blocked on this lock */
4293 wakeup(nflp);
4294 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4295 /* remove nflp from lock list and destroy */
4296 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4297 nfs_file_lock_destroy(nflp);
4298 }
4299 }
4300
4301 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4302
4303 /*
4304 * POSIX locks should be coalesced when possible.
4305 */
4306 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4307 /*
4308 * Walk through the lock queue and check each of our held locks with
4309 * the previous and next locks in the lock owner's "held lock list".
4310 * If the two locks can be coalesced, we merge the current lock into
4311 * the other (previous or next) lock. Merging this way makes sure that
4312 * lock ranges are always merged forward in the lock queue. This is
4313 * important because anyone blocked on the lock being "merged away"
4314 * will still need to block on that range and it will simply continue
4315 * checking locks that are further down the list.
4316 */
4317 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4318 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4319 continue;
4320 }
4321 if (nflp->nfl_owner != nlop) {
4322 continue;
4323 }
4324 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4325 continue;
4326 }
4327 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4328 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4329 (coalnflp->nfl_type == nflp->nfl_type) &&
4330 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4331 coalnflp->nfl_end = nflp->nfl_end;
4332 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4333 lck_mtx_lock(&nlop->nlo_lock);
4334 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4335 lck_mtx_unlock(&nlop->nlo_lock);
4336 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4337 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4338 (coalnflp->nfl_type == nflp->nfl_type) &&
4339 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4340 coalnflp->nfl_start = nflp->nfl_start;
4341 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4342 lck_mtx_lock(&nlop->nlo_lock);
4343 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4344 lck_mtx_unlock(&nlop->nlo_lock);
4345 }
4346 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4347 continue;
4348 }
4349 if (nflp->nfl_blockcnt) {
4350 /* wake up anyone blocked on this lock */
4351 wakeup(nflp);
4352 } else {
4353 /* remove nflp from lock list and destroy */
4354 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4355 nfs_file_lock_destroy(nflp);
4356 }
4357 }
4358 }
4359
4360 lck_mtx_unlock(&np->n_openlock);
4361 nfs_open_state_clear_busy(np);
4362 nfs_mount_state_in_use_end(nmp, error);
4363
4364 if (nflp2) {
4365 nfs_file_lock_destroy(nflp2);
4366 }
4367 return error;
4368}
4369
4370/*
4371 * Release all (same style) locks within the given range.
4372 */
4373int
4374nfs_advlock_unlock(
4375 nfsnode_t np,
4376 struct nfs_open_file *nofp
4377#if !CONFIG_NFS4
4378 __unused
4379#endif
4380 ,
4381 struct nfs_lock_owner *nlop,
4382 uint64_t start,
4383 uint64_t end,
4384 int style,
4385 vfs_context_t ctx)
4386{
4387 struct nfsmount *nmp;
4388 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4389 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4390
4391 nmp = NFSTONMP(np);
4392 if (nfs_mount_gone(nmp)) {
4393 return ENXIO;
4394 }
4395
4396restart:
4397 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4398 return error;
4399 }
4400#if CONFIG_NFS4
4401 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4402 nfs_mount_state_in_use_end(nmp, 0);
4403 error = nfs4_reopen(nofp, NULL);
4404 if (error) {
4405 return error;
4406 }
4407 goto restart;
4408 }
4409#endif
4410 if ((error = nfs_open_state_set_busy(np, NULL))) {
4411 nfs_mount_state_in_use_end(nmp, error);
4412 return error;
4413 }
4414
4415 lck_mtx_lock(&np->n_openlock);
4416 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4417 /*
4418 * We may need to allocate a new lock if an existing lock gets split.
4419 * So, we first scan the list to check for a split, and if there's
4420 * going to be one, we'll allocate one now.
4421 */
4422 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4423 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4424 continue;
4425 }
4426 if (nflp->nfl_owner != nlop) {
4427 continue;
4428 }
4429 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4430 continue;
4431 }
4432 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4433 continue;
4434 }
4435 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4436 willsplit = 1;
4437 break;
4438 }
4439 }
4440 if (willsplit) {
4441 lck_mtx_unlock(&np->n_openlock);
4442 nfs_open_state_clear_busy(np);
4443 nfs_mount_state_in_use_end(nmp, 0);
4444 newnflp = nfs_file_lock_alloc(nlop);
4445 if (!newnflp) {
4446 return ENOMEM;
4447 }
4448 goto restart;
4449 }
4450 }
4451
4452 /*
4453 * Free all of our locks in the given range.
4454 *
4455 * Note that this process requires sending requests to the server.
4456 * Because of this, we will release the n_openlock while performing
4457 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4458 * locks from changing underneath us. However, other entries in the
4459 * list may be removed. So we need to be careful walking the list.
4460 */
4461
4462 /*
4463 * Don't unlock ranges that are held by other-style locks.
4464 * If style is posix, don't send any unlock rpcs if flock is held.
4465 * If we unlock an flock, don't send unlock rpcs for any posix-style
4466 * ranges held - instead send unlocks for the ranges not held.
4467 */
4468 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4469 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4470 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4471 send_unlock_rpcs = 0;
4472 }
4473 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4474 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4475 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4476 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4477 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4478 uint64_t s = 0;
4479 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4480 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4481 while (!delegated && nflp) {
4482 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4483 /* unlock the range preceding this lock */
4484 lck_mtx_unlock(&np->n_openlock);
4485 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4486 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4487 if (nfs_mount_state_error_should_restart(error)) {
4488 nfs_open_state_clear_busy(np);
4489 nfs_mount_state_in_use_end(nmp, error);
4490 goto restart;
4491 }
4492 lck_mtx_lock(&np->n_openlock);
4493 if (error) {
4494 goto out;
4495 }
4496 s = nflp->nfl_end + 1;
4497 }
4498 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4499 }
4500 if (!delegated) {
4501 lck_mtx_unlock(&np->n_openlock);
4502 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4503 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4504 if (nfs_mount_state_error_should_restart(error)) {
4505 nfs_open_state_clear_busy(np);
4506 nfs_mount_state_in_use_end(nmp, error);
4507 goto restart;
4508 }
4509 lck_mtx_lock(&np->n_openlock);
4510 if (error) {
4511 goto out;
4512 }
4513 }
4514 send_unlock_rpcs = 0;
4515 }
4516
4517 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4518 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4519 continue;
4520 }
4521 if (nflp->nfl_owner != nlop) {
4522 continue;
4523 }
4524 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4525 continue;
4526 }
4527 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4528 continue;
4529 }
4530 /* here's one to unlock */
4531 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4532 /* The entire lock is being unlocked. */
4533 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4534 lck_mtx_unlock(&np->n_openlock);
4535 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4536 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4537 if (nfs_mount_state_error_should_restart(error)) {
4538 nfs_open_state_clear_busy(np);
4539 nfs_mount_state_in_use_end(nmp, error);
4540 goto restart;
4541 }
4542 lck_mtx_lock(&np->n_openlock);
4543 }
4544 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4545 if (error) {
4546 break;
4547 }
4548 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4549 lck_mtx_lock(&nlop->nlo_lock);
4550 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4551 lck_mtx_unlock(&nlop->nlo_lock);
4552 /* lock will be destroyed below, if no waiters */
4553 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4554 /* We're unlocking a range in the middle of a lock. */
4555 /* The current lock will be split into two locks. */
4556 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4557 lck_mtx_unlock(&np->n_openlock);
4558 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4559 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4560 if (nfs_mount_state_error_should_restart(error)) {
4561 nfs_open_state_clear_busy(np);
4562 nfs_mount_state_in_use_end(nmp, error);
4563 goto restart;
4564 }
4565 lck_mtx_lock(&np->n_openlock);
4566 }
4567 if (error) {
4568 break;
4569 }
4570 /* update locks and insert new lock after current lock */
4571 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4572 newnflp->nfl_type = nflp->nfl_type;
4573 newnflp->nfl_start = end + 1;
4574 newnflp->nfl_end = nflp->nfl_end;
4575 nflp->nfl_end = start - 1;
4576 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4577 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4578 nextnflp = newnflp;
4579 newnflp = NULL;
4580 } else if (start > nflp->nfl_start) {
4581 /* We're unlocking the end of a lock. */
4582 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4583 lck_mtx_unlock(&np->n_openlock);
4584 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4585 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4586 if (nfs_mount_state_error_should_restart(error)) {
4587 nfs_open_state_clear_busy(np);
4588 nfs_mount_state_in_use_end(nmp, error);
4589 goto restart;
4590 }
4591 lck_mtx_lock(&np->n_openlock);
4592 }
4593 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4594 if (error) {
4595 break;
4596 }
4597 nflp->nfl_end = start - 1;
4598 } else if (end < nflp->nfl_end) {
4599 /* We're unlocking the start of a lock. */
4600 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4601 lck_mtx_unlock(&np->n_openlock);
4602 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4603 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4604 if (nfs_mount_state_error_should_restart(error)) {
4605 nfs_open_state_clear_busy(np);
4606 nfs_mount_state_in_use_end(nmp, error);
4607 goto restart;
4608 }
4609 lck_mtx_lock(&np->n_openlock);
4610 }
4611 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4612 if (error) {
4613 break;
4614 }
4615 nflp->nfl_start = end + 1;
4616 }
4617 if (nflp->nfl_blockcnt) {
4618 /* wake up anyone blocked on this lock */
4619 wakeup(nflp);
4620 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4621 /* remove nflp from lock list and destroy */
4622 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4623 nfs_file_lock_destroy(nflp);
4624 }
4625 }
4626out:
4627 lck_mtx_unlock(&np->n_openlock);
4628 nfs_open_state_clear_busy(np);
4629 nfs_mount_state_in_use_end(nmp, 0);
4630
4631 if (newnflp) {
4632 nfs_file_lock_destroy(newnflp);
4633 }
4634 return error;
4635}
4636
4637/*
4638 * NFSv4 advisory file locking
4639 */
4640int
4641nfs_vnop_advlock(
4642 struct vnop_advlock_args /* {
4643 * struct vnodeop_desc *a_desc;
4644 * vnode_t a_vp;
4645 * caddr_t a_id;
4646 * int a_op;
4647 * struct flock *a_fl;
4648 * int a_flags;
4649 * vfs_context_t a_context;
4650 * } */*ap)
4651{
4652 vnode_t vp = ap->a_vp;
4653 nfsnode_t np = VTONFS(ap->a_vp);
4654 struct flock *fl = ap->a_fl;
4655 int op = ap->a_op;
4656 int flags = ap->a_flags;
4657 vfs_context_t ctx = ap->a_context;
4658 struct nfsmount *nmp;
4659 struct nfs_open_owner *noop = NULL;
4660 struct nfs_open_file *nofp = NULL;
4661 struct nfs_lock_owner *nlop = NULL;
4662 off_t lstart;
4663 uint64_t start, end;
4664 int error = 0, modified, style;
4665 enum vtype vtype;
4666#define OFF_MAX QUAD_MAX
4667
4668 nmp = VTONMP(ap->a_vp);
4669 if (nfs_mount_gone(nmp)) {
4670 return ENXIO;
4671 }
4672 lck_mtx_lock(&nmp->nm_lock);
4673 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4674 lck_mtx_unlock(&nmp->nm_lock);
4675 return ENOTSUP;
4676 }
4677 lck_mtx_unlock(&nmp->nm_lock);
4678
4679 if (np->n_flag & NREVOKE) {
4680 return EIO;
4681 }
4682 vtype = vnode_vtype(ap->a_vp);
4683 if (vtype == VDIR) { /* ignore lock requests on directories */
4684 return 0;
4685 }
4686 if (vtype != VREG) { /* anything other than regular files is invalid */
4687 return EINVAL;
4688 }
4689
4690 /* Convert the flock structure into a start and end. */
4691 switch (fl->l_whence) {
4692 case SEEK_SET:
4693 case SEEK_CUR:
4694 /*
4695 * Caller is responsible for adding any necessary offset
4696 * to fl->l_start when SEEK_CUR is used.
4697 */
4698 lstart = fl->l_start;
4699 break;
4700 case SEEK_END:
4701 /* need to flush, and refetch attributes to make */
4702 /* sure we have the correct end of file offset */
4703 if ((error = nfs_node_lock(np))) {
4704 return error;
4705 }
4706 modified = (np->n_flag & NMODIFIED);
4707 nfs_node_unlock(np);
4708 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4709 return error;
4710 }
4711 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4712 return error;
4713 }
4714 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4715 if ((np->n_size > OFF_MAX) ||
4716 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4717 error = EOVERFLOW;
4718 }
4719 lstart = np->n_size + fl->l_start;
4720 nfs_data_unlock(np);
4721 if (error) {
4722 return error;
4723 }
4724 break;
4725 default:
4726 return EINVAL;
4727 }
4728 if (lstart < 0) {
4729 return EINVAL;
4730 }
4731 start = lstart;
4732 if (fl->l_len == 0) {
4733 end = UINT64_MAX;
4734 } else if (fl->l_len > 0) {
4735 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4736 return EOVERFLOW;
4737 }
4738 end = start - 1 + fl->l_len;
4739 } else { /* l_len is negative */
4740 if ((lstart + fl->l_len) < 0) {
4741 return EINVAL;
4742 }
4743 end = start - 1;
4744 start += fl->l_len;
4745 }
4746 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4747 return EINVAL;
4748 }
4749
4750 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4751 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4752 return EINVAL;
4753 }
4754
4755 /* find the lock owner, alloc if not unlock */
4756 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4757 if (!nlop) {
4758 error = (op == F_UNLCK) ? 0 : ENOMEM;
4759 if (error) {
4760 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4761 }
4762 goto out;
4763 }
4764
4765 if (op == F_GETLK) {
4766 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4767 } else {
4768 /* find the open owner */
4769 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4770 if (!noop) {
4771 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4772 error = EPERM;
4773 goto out;
4774 }
4775 /* find the open file */
4776#if CONFIG_NFS4
4777restart:
4778#endif
4779 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4780 if (error) {
4781 error = EBADF;
4782 }
4783 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4784 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4785 error = EIO;
4786 }
4787#if CONFIG_NFS4
4788 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4789 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4790 nofp = NULL;
4791 if (!error) {
4792 goto restart;
4793 }
4794 }
4795#endif
4796 if (error) {
4797 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4798 goto out;
4799 }
4800 if (op == F_UNLCK) {
4801 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4802 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4803 if ((op == F_SETLK) && (flags & F_WAIT)) {
4804 op = F_SETLKW;
4805 }
4806 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4807 } else {
4808 /* not getlk, unlock or lock? */
4809 error = EINVAL;
4810 }
4811 }
4812
4813out:
4814 if (nlop) {
4815 nfs_lock_owner_rele(nlop);
4816 }
4817 if (noop) {
4818 nfs_open_owner_rele(noop);
4819 }
4820 return error;
4821}
4822
4823/*
4824 * Check if an open owner holds any locks on a file.
4825 */
4826int
4827nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4828{
4829 struct nfs_lock_owner *nlop;
4830
4831 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4832 if (nlop->nlo_open_owner != noop) {
4833 continue;
4834 }
4835 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4836 break;
4837 }
4838 }
4839 return nlop ? 1 : 0;
4840}
4841
4842#if CONFIG_NFS4
4843/*
4844 * Reopen simple (no deny, no locks) open state that was lost.
4845 */
4846int
4847nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4848{
4849 struct nfs_open_owner *noop = nofp->nof_owner;
4850 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4851 nfsnode_t np = nofp->nof_np;
4852 vnode_t vp = NFSTOV(np);
4853 vnode_t dvp = NULL;
4854 struct componentname cn;
4855 const char *vname = NULL;
4856 const char *name = NULL;
4857 size_t namelen;
4858 char smallname[128];
4859 char *filename = NULL;
4860 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4861 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4862
4863 lck_mtx_lock(&nofp->nof_lock);
4864 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4865 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4866 break;
4867 }
4868 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4869 slpflag = 0;
4870 }
4871 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4872 lck_mtx_unlock(&nofp->nof_lock);
4873 return error;
4874 }
4875 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4876 lck_mtx_unlock(&nofp->nof_lock);
4877
4878 nfs_node_lock_force(np);
4879 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4880 /*
4881 * The node's been sillyrenamed, so we need to use
4882 * the sillyrename directory/name to do the open.
4883 */
4884 struct nfs_sillyrename *nsp = np->n_sillyrename;
4885 dvp = NFSTOV(nsp->nsr_dnp);
4886 if ((error = vnode_get(dvp))) {
4887 dvp = NULLVP;
4888 nfs_node_unlock(np);
4889 goto out;
4890 }
4891 name = nsp->nsr_name;
4892 } else {
4893 /*
4894 * [sigh] We can't trust VFS to get the parent right for named
4895 * attribute nodes. (It likes to reparent the nodes after we've
4896 * created them.) Luckily we can probably get the right parent
4897 * from the n_parent we have stashed away.
4898 */
4899 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4900 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
4901 dvp = NULL;
4902 }
4903 if (!dvp) {
4904 dvp = vnode_getparent(vp);
4905 }
4906 vname = vnode_getname(vp);
4907 if (!dvp || !vname) {
4908 if (!error) {
4909 error = EIO;
4910 }
4911 nfs_node_unlock(np);
4912 goto out;
4913 }
4914 name = vname;
4915 }
4916 filename = &smallname[0];
4917 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4918 if (namelen >= sizeof(smallname)) {
4919 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
4920 if (!filename) {
4921 error = ENOMEM;
4922 goto out;
4923 }
4924 snprintf(filename, namelen + 1, "%s", name);
4925 }
4926 nfs_node_unlock(np);
4927 bzero(&cn, sizeof(cn));
4928 cn.cn_nameptr = filename;
4929 cn.cn_namelen = namelen;
4930
4931restart:
4932 done = 0;
4933 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
4934 goto out;
4935 }
4936
4937 if (nofp->nof_rw) {
4938 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4939 }
4940 if (!error && nofp->nof_w) {
4941 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4942 }
4943 if (!error && nofp->nof_r) {
4944 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4945 }
4946
4947 if (nfs_mount_state_in_use_end(nmp, error)) {
4948 if (error == NFSERR_GRACE) {
4949 goto restart;
4950 }
4951 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4952 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4953 error = 0;
4954 goto out;
4955 }
4956 done = 1;
4957out:
4958 if (error && (error != EINTR) && (error != ERESTART)) {
4959 nfs_revoke_open_state_for_node(np);
4960 }
4961 lck_mtx_lock(&nofp->nof_lock);
4962 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4963 if (done) {
4964 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4965 } else if (error) {
4966 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4967 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4968 }
4969 lck_mtx_unlock(&nofp->nof_lock);
4970 if (filename && (filename != &smallname[0])) {
4971 FREE(filename, M_TEMP);
4972 }
4973 if (vname) {
4974 vnode_putname(vname);
4975 }
4976 if (dvp != NULLVP) {
4977 vnode_put(dvp);
4978 }
4979 return error;
4980}
4981
4982/*
4983 * Send a normal OPEN RPC to open/create a file.
4984 */
4985int
4986nfs4_open_rpc(
4987 struct nfs_open_file *nofp,
4988 vfs_context_t ctx,
4989 struct componentname *cnp,
4990 struct vnode_attr *vap,
4991 vnode_t dvp,
4992 vnode_t *vpp,
4993 int create,
4994 int share_access,
4995 int share_deny)
4996{
4997 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4998 cnp, vap, dvp, vpp, create, share_access, share_deny);
4999}
5000
5001/*
5002 * Send an OPEN RPC to reopen a file.
5003 */
5004int
5005nfs4_open_reopen_rpc(
5006 struct nfs_open_file *nofp,
5007 thread_t thd,
5008 kauth_cred_t cred,
5009 struct componentname *cnp,
5010 vnode_t dvp,
5011 vnode_t *vpp,
5012 int share_access,
5013 int share_deny)
5014{
5015 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
5016}
5017
5018/*
5019 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5020 */
5021int
5022nfs4_open_confirm_rpc(
5023 struct nfsmount *nmp,
5024 nfsnode_t dnp,
5025 u_char *fhp,
5026 int fhlen,
5027 struct nfs_open_owner *noop,
5028 nfs_stateid *sid,
5029 thread_t thd,
5030 kauth_cred_t cred,
5031 struct nfs_vattr *nvap,
5032 uint64_t *xidp)
5033{
5034 struct nfsm_chain nmreq, nmrep;
5035 int error = 0, status, numops;
5036 struct nfsreq_secinfo_args si;
5037
5038 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5039 nfsm_chain_null(&nmreq);
5040 nfsm_chain_null(&nmrep);
5041
5042 // PUTFH, OPEN_CONFIRM, GETATTR
5043 numops = 3;
5044 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5045 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5046 numops--;
5047 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5048 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5049 numops--;
5050 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5051 nfsm_chain_add_stateid(error, &nmreq, sid);
5052 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5053 numops--;
5054 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5055 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5056 nfsm_chain_build_done(error, &nmreq);
5057 nfsm_assert(error, (numops == 0), EPROTO);
5058 nfsmout_if(error);
5059 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5060
5061 nfsm_chain_skip_tag(error, &nmrep);
5062 nfsm_chain_get_32(error, &nmrep, numops);
5063 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5064 nfsmout_if(error);
5065 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5066 nfs_owner_seqid_increment(noop, NULL, error);
5067 nfsm_chain_get_stateid(error, &nmrep, sid);
5068 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5069 nfsmout_if(error);
5070 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5071nfsmout:
5072 nfsm_chain_cleanup(&nmreq);
5073 nfsm_chain_cleanup(&nmrep);
5074 return error;
5075}
5076
5077/*
5078 * common OPEN RPC code
5079 *
5080 * If create is set, ctx must be passed in.
5081 * Returns a node on success if no node passed in.
5082 */
5083int
5084nfs4_open_rpc_internal(
5085 struct nfs_open_file *nofp,
5086 vfs_context_t ctx,
5087 thread_t thd,
5088 kauth_cred_t cred,
5089 struct componentname *cnp,
5090 struct vnode_attr *vap,
5091 vnode_t dvp,
5092 vnode_t *vpp,
5093 int create,
5094 int share_access,
5095 int share_deny)
5096{
5097 struct nfsmount *nmp;
5098 struct nfs_open_owner *noop = nofp->nof_owner;
5099 struct nfs_vattr nvattr;
5100 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5101 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
5102 u_int64_t xid, savedxid = 0;
5103 nfsnode_t dnp = VTONFS(dvp);
5104 nfsnode_t np, newnp = NULL;
5105 vnode_t newvp = NULL;
5106 struct nfsm_chain nmreq, nmrep;
5107 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5108 uint32_t rflags, delegation, recall;
5109 struct nfs_stateid stateid, dstateid, *sid;
5110 fhandle_t fh;
5111 struct nfsreq rq, *req = &rq;
5112 struct nfs_dulookup dul;
5113 char sbuf[64], *s;
5114 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5115 struct kauth_ace ace;
5116 struct nfsreq_secinfo_args si;
5117
5118 if (create && !ctx) {
5119 return EINVAL;
5120 }
5121
5122 nmp = VTONMP(dvp);
5123 if (nfs_mount_gone(nmp)) {
5124 return ENXIO;
5125 }
5126 nfsvers = nmp->nm_vers;
5127 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5128 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5129 return EINVAL;
5130 }
5131
5132 np = *vpp ? VTONFS(*vpp) : NULL;
5133 if (create && vap) {
5134 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5135 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5136 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5137 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5138 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5139 vap->va_vaflags |= VA_UTIMES_NULL;
5140 }
5141 } else {
5142 exclusive = gotuid = gotgid = 0;
5143 }
5144 if (nofp) {
5145 sid = &nofp->nof_stateid;
5146 } else {
5147 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5148 sid = &stateid;
5149 }
5150
5151 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5152 return error;
5153 }
5154again:
5155 rflags = delegation = recall = 0;
5156 ace.ace_flags = 0;
5157 s = sbuf;
5158 slen = sizeof(sbuf);
5159 NVATTR_INIT(&nvattr);
5160 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5161
5162 nfsm_chain_null(&nmreq);
5163 nfsm_chain_null(&nmrep);
5164
5165 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5166 numops = 6;
5167 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5168 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5169 numops--;
5170 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5171 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5172 numops--;
5173 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5174 numops--;
5175 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5176 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5177 nfsm_chain_add_32(error, &nmreq, share_access);
5178 nfsm_chain_add_32(error, &nmreq, share_deny);
5179 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
5180 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5181 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
5182 nfsm_chain_add_32(error, &nmreq, create);
5183 if (create) {
5184 if (exclusive) {
5185 static uint32_t create_verf; // XXX need a better verifier
5186 create_verf++;
5187 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5188 /* insert 64 bit verifier */
5189 nfsm_chain_add_32(error, &nmreq, create_verf);
5190 nfsm_chain_add_32(error, &nmreq, create_verf);
5191 } else {
5192 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5193 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5194 }
5195 }
5196 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5197 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5198 numops--;
5199 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5200 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5201 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5202 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5203 numops--;
5204 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5205 numops--;
5206 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5207 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5208 nfsm_chain_build_done(error, &nmreq);
5209 nfsm_assert(error, (numops == 0), EPROTO);
5210 if (!error) {
5211 error = busyerror = nfs_node_set_busy(dnp, thd);
5212 }
5213 nfsmout_if(error);
5214
5215 if (create && !namedattrs) {
5216 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5217 }
5218
5219 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
5220 if (!error) {
5221 if (create && !namedattrs) {
5222 nfs_dulookup_start(&dul, dnp, ctx);
5223 }
5224 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5225 savedxid = xid;
5226 }
5227
5228 if (create && !namedattrs) {
5229 nfs_dulookup_finish(&dul, dnp, ctx);
5230 }
5231
5232 if ((lockerror = nfs_node_lock(dnp))) {
5233 error = lockerror;
5234 }
5235 nfsm_chain_skip_tag(error, &nmrep);
5236 nfsm_chain_get_32(error, &nmrep, numops);
5237 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5238 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5239 nfsmout_if(error);
5240 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5241 nfs_owner_seqid_increment(noop, NULL, error);
5242 nfsm_chain_get_stateid(error, &nmrep, sid);
5243 nfsm_chain_check_change_info(error, &nmrep, dnp);
5244 nfsm_chain_get_32(error, &nmrep, rflags);
5245 bmlen = NFS_ATTR_BITMAP_LEN;
5246 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5247 nfsm_chain_get_32(error, &nmrep, delegation);
5248 if (!error) {
5249 switch (delegation) {
5250 case NFS_OPEN_DELEGATE_NONE:
5251 break;
5252 case NFS_OPEN_DELEGATE_READ:
5253 case NFS_OPEN_DELEGATE_WRITE:
5254 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5255 nfsm_chain_get_32(error, &nmrep, recall);
5256 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5257 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5258 }
5259 /* if we have any trouble accepting the ACE, just invalidate it */
5260 ace_type = ace_flags = ace_mask = len = 0;
5261 nfsm_chain_get_32(error, &nmrep, ace_type);
5262 nfsm_chain_get_32(error, &nmrep, ace_flags);
5263 nfsm_chain_get_32(error, &nmrep, ace_mask);
5264 nfsm_chain_get_32(error, &nmrep, len);
5265 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5266 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5267 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5268 if (!error && (len >= slen)) {
5269 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5270 if (s) {
5271 slen = len + 1;
5272 } else {
5273 ace.ace_flags = 0;
5274 }
5275 }
5276 if (s) {
5277 nfsm_chain_get_opaque(error, &nmrep, len, s);
5278 } else {
5279 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5280 }
5281 if (!error && s) {
5282 s[len] = '\0';
5283 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5284 ace.ace_flags = 0;
5285 }
5286 }
5287 if (error || !s) {
5288 ace.ace_flags = 0;
5289 }
5290 if (s && (s != sbuf)) {
5291 FREE(s, M_TEMP);
5292 }
5293 break;
5294 default:
5295 error = EBADRPC;
5296 break;
5297 }
5298 }
5299 /* At this point if we have no error, the object was created/opened. */
5300 open_error = error;
5301 nfsmout_if(error);
5302 if (create && vap && !exclusive) {
5303 nfs_vattr_set_supported(bitmap, vap);
5304 }
5305 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5306 nfsmout_if(error);
5307 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5308 nfsmout_if(error);
5309 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5310 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5311 error = EBADRPC;
5312 goto nfsmout;
5313 }
5314 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5315 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5316 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5317 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5318 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5319 }
5320 }
5321 /* directory attributes: if we don't get them, make sure to invalidate */
5322 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5323 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5324 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5325 if (error) {
5326 NATTRINVALIDATE(dnp);
5327 }
5328 nfsmout_if(error);
5329
5330 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5331 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5332 }
5333
5334 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5335 nfs_node_unlock(dnp);
5336 lockerror = ENOENT;
5337 NVATTR_CLEANUP(&nvattr);
5338 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
5339 nfsmout_if(error);
5340 savedxid = xid;
5341 if ((lockerror = nfs_node_lock(dnp))) {
5342 error = lockerror;
5343 }
5344 }
5345
5346nfsmout:
5347 nfsm_chain_cleanup(&nmreq);
5348 nfsm_chain_cleanup(&nmrep);
5349
5350 if (!lockerror && create) {
5351 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5352 dnp->n_flag &= ~NNEGNCENTRIES;
5353 cache_purge_negatives(dvp);
5354 }
5355 dnp->n_flag |= NMODIFIED;
5356 nfs_node_unlock(dnp);
5357 lockerror = ENOENT;
5358 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5359 }
5360 if (!lockerror) {
5361 nfs_node_unlock(dnp);
5362 }
5363 if (!error && !np && fh.fh_len) {
5364 /* create the vnode with the filehandle and attributes */
5365 xid = savedxid;
5366 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
5367 if (!error) {
5368 newvp = NFSTOV(newnp);
5369 }
5370 }
5371 NVATTR_CLEANUP(&nvattr);
5372 if (!busyerror) {
5373 nfs_node_clear_busy(dnp);
5374 }
5375 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5376 if (!np) {
5377 np = newnp;
5378 }
5379 if (!error && np && !recall) {
5380 /* stuff the delegation state in the node */
5381 lck_mtx_lock(&np->n_openlock);
5382 np->n_openflags &= ~N_DELEG_MASK;
5383 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5384 np->n_dstateid = dstateid;
5385 np->n_dace = ace;
5386 if (np->n_dlink.tqe_next == NFSNOLIST) {
5387 lck_mtx_lock(&nmp->nm_lock);
5388 if (np->n_dlink.tqe_next == NFSNOLIST) {
5389 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5390 }
5391 lck_mtx_unlock(&nmp->nm_lock);
5392 }
5393 lck_mtx_unlock(&np->n_openlock);
5394 } else {
5395 /* give the delegation back */
5396 if (np) {
5397 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5398 /* update delegation state and return it */
5399 lck_mtx_lock(&np->n_openlock);
5400 np->n_openflags &= ~N_DELEG_MASK;
5401 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5402 np->n_dstateid = dstateid;
5403 np->n_dace = ace;
5404 if (np->n_dlink.tqe_next == NFSNOLIST) {
5405 lck_mtx_lock(&nmp->nm_lock);
5406 if (np->n_dlink.tqe_next == NFSNOLIST) {
5407 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5408 }
5409 lck_mtx_unlock(&nmp->nm_lock);
5410 }
5411 lck_mtx_unlock(&np->n_openlock);
5412 /* don't need to send a separate delegreturn for fh */
5413 fh.fh_len = 0;
5414 }
5415 /* return np's current delegation */
5416 nfs4_delegation_return(np, 0, thd, cred);
5417 }
5418 if (fh.fh_len) { /* return fh's delegation if it wasn't for np */
5419 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
5420 }
5421 }
5422 }
5423 if (error) {
5424 if (exclusive && (error == NFSERR_NOTSUPP)) {
5425 exclusive = 0;
5426 goto again;
5427 }
5428 if (newvp) {
5429 nfs_node_unlock(newnp);
5430 vnode_put(newvp);
5431 }
5432 } else if (create) {
5433 nfs_node_unlock(newnp);
5434 if (exclusive) {
5435 error = nfs4_setattr_rpc(newnp, vap, ctx);
5436 if (error && (gotuid || gotgid)) {
5437 /* it's possible the server didn't like our attempt to set IDs. */
5438 /* so, let's try it again without those */
5439 VATTR_CLEAR_ACTIVE(vap, va_uid);
5440 VATTR_CLEAR_ACTIVE(vap, va_gid);
5441 error = nfs4_setattr_rpc(newnp, vap, ctx);
5442 }
5443 }
5444 if (error) {
5445 vnode_put(newvp);
5446 } else {
5447 *vpp = newvp;
5448 }
5449 }
5450 nfs_open_owner_clear_busy(noop);
5451 return error;
5452}
5453
5454
5455/*
5456 * Send an OPEN RPC to claim a delegated open for a file
5457 */
5458int
5459nfs4_claim_delegated_open_rpc(
5460 struct nfs_open_file *nofp,
5461 int share_access,
5462 int share_deny,
5463 int flags)
5464{
5465 struct nfsmount *nmp;
5466 struct nfs_open_owner *noop = nofp->nof_owner;
5467 struct nfs_vattr nvattr;
5468 int error = 0, lockerror = ENOENT, status;
5469 int nfsvers, numops;
5470 u_int64_t xid;
5471 nfsnode_t np = nofp->nof_np;
5472 struct nfsm_chain nmreq, nmrep;
5473 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5474 uint32_t rflags = 0, delegation, recall = 0;
5475 fhandle_t fh;
5476 struct nfs_stateid dstateid;
5477 char sbuf[64], *s = sbuf;
5478 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5479 struct kauth_ace ace;
5480 vnode_t dvp = NULL;
5481 const char *vname = NULL;
5482 const char *name = NULL;
5483 size_t namelen;
5484 char smallname[128];
5485 char *filename = NULL;
5486 struct nfsreq_secinfo_args si;
5487
5488 nmp = NFSTONMP(np);
5489 if (nfs_mount_gone(nmp)) {
5490 return ENXIO;
5491 }
5492 nfsvers = nmp->nm_vers;
5493
5494 nfs_node_lock_force(np);
5495 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5496 /*
5497 * The node's been sillyrenamed, so we need to use
5498 * the sillyrename directory/name to do the open.
5499 */
5500 struct nfs_sillyrename *nsp = np->n_sillyrename;
5501 dvp = NFSTOV(nsp->nsr_dnp);
5502 if ((error = vnode_get(dvp))) {
5503 dvp = NULLVP;
5504 nfs_node_unlock(np);
5505 goto out;
5506 }
5507 name = nsp->nsr_name;
5508 } else {
5509 /*
5510 * [sigh] We can't trust VFS to get the parent right for named
5511 * attribute nodes. (It likes to reparent the nodes after we've
5512 * created them.) Luckily we can probably get the right parent
5513 * from the n_parent we have stashed away.
5514 */
5515 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5516 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5517 dvp = NULL;
5518 }
5519 if (!dvp) {
5520 dvp = vnode_getparent(NFSTOV(np));
5521 }
5522 vname = vnode_getname(NFSTOV(np));
5523 if (!dvp || !vname) {
5524 if (!error) {
5525 error = EIO;
5526 }
5527 nfs_node_unlock(np);
5528 goto out;
5529 }
5530 name = vname;
5531 }
5532 filename = &smallname[0];
5533 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5534 if (namelen >= sizeof(smallname)) {
5535 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
5536 if (!filename) {
5537 error = ENOMEM;
5538 nfs_node_unlock(np);
5539 goto out;
5540 }
5541 snprintf(filename, namelen + 1, "%s", name);
5542 }
5543 nfs_node_unlock(np);
5544
5545 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5546 goto out;
5547 }
5548 NVATTR_INIT(&nvattr);
5549 delegation = NFS_OPEN_DELEGATE_NONE;
5550 dstateid = np->n_dstateid;
5551 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5552
5553 nfsm_chain_null(&nmreq);
5554 nfsm_chain_null(&nmrep);
5555
5556 // PUTFH, OPEN, GETATTR(FH)
5557 numops = 3;
5558 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5559 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5560 numops--;
5561 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5562 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5563 numops--;
5564 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5565 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5566 nfsm_chain_add_32(error, &nmreq, share_access);
5567 nfsm_chain_add_32(error, &nmreq, share_deny);
5568 // open owner: clientid + uid
5569 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5570 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5571 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5572 // openflag4
5573 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5574 // open_claim4
5575 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5576 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5577 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5578 numops--;
5579 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5580 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5581 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5582 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5583 nfsm_chain_build_done(error, &nmreq);
5584 nfsm_assert(error, (numops == 0), EPROTO);
5585 nfsmout_if(error);
5586
5587 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5588 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5589
5590 if ((lockerror = nfs_node_lock(np))) {
5591 error = lockerror;
5592 }
5593 nfsm_chain_skip_tag(error, &nmrep);
5594 nfsm_chain_get_32(error, &nmrep, numops);
5595 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5596 nfsmout_if(error);
5597 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5598 nfs_owner_seqid_increment(noop, NULL, error);
5599 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5600 nfsm_chain_check_change_info(error, &nmrep, np);
5601 nfsm_chain_get_32(error, &nmrep, rflags);
5602 bmlen = NFS_ATTR_BITMAP_LEN;
5603 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5604 nfsm_chain_get_32(error, &nmrep, delegation);
5605 if (!error) {
5606 switch (delegation) {
5607 case NFS_OPEN_DELEGATE_NONE:
5608 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5609 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5610 break;
5611 case NFS_OPEN_DELEGATE_READ:
5612 case NFS_OPEN_DELEGATE_WRITE:
5613 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5614 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5615 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5616 (delegation == NFS_OPEN_DELEGATE_READ))) {
5617 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5618 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5619 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5620 }
5621 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5622 nfsm_chain_get_32(error, &nmrep, recall);
5623 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5624 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5625 }
5626 /* if we have any trouble accepting the ACE, just invalidate it */
5627 ace_type = ace_flags = ace_mask = len = 0;
5628 nfsm_chain_get_32(error, &nmrep, ace_type);
5629 nfsm_chain_get_32(error, &nmrep, ace_flags);
5630 nfsm_chain_get_32(error, &nmrep, ace_mask);
5631 nfsm_chain_get_32(error, &nmrep, len);
5632 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5633 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5634 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5635 if (!error && (len >= slen)) {
5636 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5637 if (s) {
5638 slen = len + 1;
5639 } else {
5640 ace.ace_flags = 0;
5641 }
5642 }
5643 if (s) {
5644 nfsm_chain_get_opaque(error, &nmrep, len, s);
5645 } else {
5646 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5647 }
5648 if (!error && s) {
5649 s[len] = '\0';
5650 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5651 ace.ace_flags = 0;
5652 }
5653 }
5654 if (error || !s) {
5655 ace.ace_flags = 0;
5656 }
5657 if (s && (s != sbuf)) {
5658 FREE(s, M_TEMP);
5659 }
5660 if (!error) {
5661 /* stuff the latest delegation state in the node */
5662 lck_mtx_lock(&np->n_openlock);
5663 np->n_openflags &= ~N_DELEG_MASK;
5664 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5665 np->n_dstateid = dstateid;
5666 np->n_dace = ace;
5667 if (np->n_dlink.tqe_next == NFSNOLIST) {
5668 lck_mtx_lock(&nmp->nm_lock);
5669 if (np->n_dlink.tqe_next == NFSNOLIST) {
5670 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5671 }
5672 lck_mtx_unlock(&nmp->nm_lock);
5673 }
5674 lck_mtx_unlock(&np->n_openlock);
5675 }
5676 break;
5677 default:
5678 error = EBADRPC;
5679 break;
5680 }
5681 }
5682 nfsmout_if(error);
5683 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5684 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5685 nfsmout_if(error);
5686 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5687 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5688 error = EBADRPC;
5689 goto nfsmout;
5690 }
5691 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5692 // XXX what if fh doesn't match the vnode we think we're re-opening?
5693 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5694 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5695 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5696 }
5697 }
5698 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5699 nfsmout_if(error);
5700 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5701 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5702 }
5703nfsmout:
5704 NVATTR_CLEANUP(&nvattr);
5705 nfsm_chain_cleanup(&nmreq);
5706 nfsm_chain_cleanup(&nmrep);
5707 if (!lockerror) {
5708 nfs_node_unlock(np);
5709 }
5710 nfs_open_owner_clear_busy(noop);
5711 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5712 if (recall) {
5713 /*
5714 * We're making a delegated claim.
5715 * Don't return the delegation here in case we have more to claim.
5716 * Just make sure it's queued up to be returned.
5717 */
5718 nfs4_delegation_return_enqueue(np);
5719 }
5720 }
5721out:
5722 // if (!error)
5723 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5724 if (filename && (filename != &smallname[0])) {
5725 FREE(filename, M_TEMP);
5726 }
5727 if (vname) {
5728 vnode_putname(vname);
5729 }
5730 if (dvp != NULLVP) {
5731 vnode_put(dvp);
5732 }
5733 return error;
5734}
5735
5736/*
5737 * Send an OPEN RPC to reclaim an open file.
5738 */
5739int
5740nfs4_open_reclaim_rpc(
5741 struct nfs_open_file *nofp,
5742 int share_access,
5743 int share_deny)
5744{
5745 struct nfsmount *nmp;
5746 struct nfs_open_owner *noop = nofp->nof_owner;
5747 struct nfs_vattr nvattr;
5748 int error = 0, lockerror = ENOENT, status;
5749 int nfsvers, numops;
5750 u_int64_t xid;
5751 nfsnode_t np = nofp->nof_np;
5752 struct nfsm_chain nmreq, nmrep;
5753 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5754 uint32_t rflags = 0, delegation, recall = 0;
5755 fhandle_t fh;
5756 struct nfs_stateid dstateid;
5757 char sbuf[64], *s = sbuf;
5758 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5759 struct kauth_ace ace;
5760 struct nfsreq_secinfo_args si;
5761
5762 nmp = NFSTONMP(np);
5763 if (nfs_mount_gone(nmp)) {
5764 return ENXIO;
5765 }
5766 nfsvers = nmp->nm_vers;
5767
5768 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5769 return error;
5770 }
5771
5772 NVATTR_INIT(&nvattr);
5773 delegation = NFS_OPEN_DELEGATE_NONE;
5774 dstateid = np->n_dstateid;
5775 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5776
5777 nfsm_chain_null(&nmreq);
5778 nfsm_chain_null(&nmrep);
5779
5780 // PUTFH, OPEN, GETATTR(FH)
5781 numops = 3;
5782 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5783 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5784 numops--;
5785 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5786 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5787 numops--;
5788 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5789 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5790 nfsm_chain_add_32(error, &nmreq, share_access);
5791 nfsm_chain_add_32(error, &nmreq, share_deny);
5792 // open owner: clientid + uid
5793 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5794 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5795 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5796 // openflag4
5797 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5798 // open_claim4
5799 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5800 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5801 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5802 NFS_OPEN_DELEGATE_NONE;
5803 nfsm_chain_add_32(error, &nmreq, delegation);
5804 delegation = NFS_OPEN_DELEGATE_NONE;
5805 numops--;
5806 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5807 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5808 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5809 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5810 nfsm_chain_build_done(error, &nmreq);
5811 nfsm_assert(error, (numops == 0), EPROTO);
5812 nfsmout_if(error);
5813
5814 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5815 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5816
5817 if ((lockerror = nfs_node_lock(np))) {
5818 error = lockerror;
5819 }
5820 nfsm_chain_skip_tag(error, &nmrep);
5821 nfsm_chain_get_32(error, &nmrep, numops);
5822 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5823 nfsmout_if(error);
5824 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5825 nfs_owner_seqid_increment(noop, NULL, error);
5826 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5827 nfsm_chain_check_change_info(error, &nmrep, np);
5828 nfsm_chain_get_32(error, &nmrep, rflags);
5829 bmlen = NFS_ATTR_BITMAP_LEN;
5830 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5831 nfsm_chain_get_32(error, &nmrep, delegation);
5832 if (!error) {
5833 switch (delegation) {
5834 case NFS_OPEN_DELEGATE_NONE:
5835 if (np->n_openflags & N_DELEG_MASK) {
5836 /*
5837 * Hey! We were supposed to get our delegation back even
5838 * if it was getting immediately recalled. Bad server!
5839 *
5840 * Just try to return the existing delegation.
5841 */
5842 // NP(np, "nfs: open reclaim didn't return delegation?");
5843 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5844 recall = 1;
5845 }
5846 break;
5847 case NFS_OPEN_DELEGATE_READ:
5848 case NFS_OPEN_DELEGATE_WRITE:
5849 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5850 nfsm_chain_get_32(error, &nmrep, recall);
5851 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5852 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5853 }
5854 /* if we have any trouble accepting the ACE, just invalidate it */
5855 ace_type = ace_flags = ace_mask = len = 0;
5856 nfsm_chain_get_32(error, &nmrep, ace_type);
5857 nfsm_chain_get_32(error, &nmrep, ace_flags);
5858 nfsm_chain_get_32(error, &nmrep, ace_mask);
5859 nfsm_chain_get_32(error, &nmrep, len);
5860 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5861 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5862 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5863 if (!error && (len >= slen)) {
5864 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5865 if (s) {
5866 slen = len + 1;
5867 } else {
5868 ace.ace_flags = 0;
5869 }
5870 }
5871 if (s) {
5872 nfsm_chain_get_opaque(error, &nmrep, len, s);
5873 } else {
5874 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5875 }
5876 if (!error && s) {
5877 s[len] = '\0';
5878 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5879 ace.ace_flags = 0;
5880 }
5881 }
5882 if (error || !s) {
5883 ace.ace_flags = 0;
5884 }
5885 if (s && (s != sbuf)) {
5886 FREE(s, M_TEMP);
5887 }
5888 if (!error) {
5889 /* stuff the delegation state in the node */
5890 lck_mtx_lock(&np->n_openlock);
5891 np->n_openflags &= ~N_DELEG_MASK;
5892 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5893 np->n_dstateid = dstateid;
5894 np->n_dace = ace;
5895 if (np->n_dlink.tqe_next == NFSNOLIST) {
5896 lck_mtx_lock(&nmp->nm_lock);
5897 if (np->n_dlink.tqe_next == NFSNOLIST) {
5898 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5899 }
5900 lck_mtx_unlock(&nmp->nm_lock);
5901 }
5902 lck_mtx_unlock(&np->n_openlock);
5903 }
5904 break;
5905 default:
5906 error = EBADRPC;
5907 break;
5908 }
5909 }
5910 nfsmout_if(error);
5911 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5912 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5913 nfsmout_if(error);
5914 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5915 NP(np, "nfs: open reclaim didn't return filehandle?");
5916 error = EBADRPC;
5917 goto nfsmout;
5918 }
5919 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5920 // XXX what if fh doesn't match the vnode we think we're re-opening?
5921 // That should be pretty hard in this case, given that we are doing
5922 // the open reclaim using the file handle (and not a dir/name pair).
5923 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5924 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5925 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5926 }
5927 }
5928 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5929 nfsmout_if(error);
5930 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5931 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5932 }
5933nfsmout:
5934 // if (!error)
5935 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5936 NVATTR_CLEANUP(&nvattr);
5937 nfsm_chain_cleanup(&nmreq);
5938 nfsm_chain_cleanup(&nmrep);
5939 if (!lockerror) {
5940 nfs_node_unlock(np);
5941 }
5942 nfs_open_owner_clear_busy(noop);
5943 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5944 if (recall) {
5945 nfs4_delegation_return_enqueue(np);
5946 }
5947 }
5948 return error;
5949}
5950
5951int
5952nfs4_open_downgrade_rpc(
5953 nfsnode_t np,
5954 struct nfs_open_file *nofp,
5955 vfs_context_t ctx)
5956{
5957 struct nfs_open_owner *noop = nofp->nof_owner;
5958 struct nfsmount *nmp;
5959 int error, lockerror = ENOENT, status, nfsvers, numops;
5960 struct nfsm_chain nmreq, nmrep;
5961 u_int64_t xid;
5962 struct nfsreq_secinfo_args si;
5963
5964 nmp = NFSTONMP(np);
5965 if (nfs_mount_gone(nmp)) {
5966 return ENXIO;
5967 }
5968 nfsvers = nmp->nm_vers;
5969
5970 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5971 return error;
5972 }
5973
5974 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5975 nfsm_chain_null(&nmreq);
5976 nfsm_chain_null(&nmrep);
5977
5978 // PUTFH, OPEN_DOWNGRADE, GETATTR
5979 numops = 3;
5980 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5981 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5982 numops--;
5983 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5984 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5985 numops--;
5986 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5987 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5988 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5989 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5990 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5991 numops--;
5992 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5993 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5994 nfsm_chain_build_done(error, &nmreq);
5995 nfsm_assert(error, (numops == 0), EPROTO);
5996 nfsmout_if(error);
5997 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5998 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5999 &si, R_NOINTR, &nmrep, &xid, &status);
6000
6001 if ((lockerror = nfs_node_lock(np))) {
6002 error = lockerror;
6003 }
6004 nfsm_chain_skip_tag(error, &nmrep);
6005 nfsm_chain_get_32(error, &nmrep, numops);
6006 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6007 nfsmout_if(error);
6008 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6009 nfs_owner_seqid_increment(noop, NULL, error);
6010 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6011 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6012 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6013nfsmout:
6014 if (!lockerror) {
6015 nfs_node_unlock(np);
6016 }
6017 nfs_open_owner_clear_busy(noop);
6018 nfsm_chain_cleanup(&nmreq);
6019 nfsm_chain_cleanup(&nmrep);
6020 return error;
6021}
6022
6023int
6024nfs4_close_rpc(
6025 nfsnode_t np,
6026 struct nfs_open_file *nofp,
6027 thread_t thd,
6028 kauth_cred_t cred,
6029 int flags)
6030{
6031 struct nfs_open_owner *noop = nofp->nof_owner;
6032 struct nfsmount *nmp;
6033 int error, lockerror = ENOENT, status, nfsvers, numops;
6034 struct nfsm_chain nmreq, nmrep;
6035 u_int64_t xid;
6036 struct nfsreq_secinfo_args si;
6037
6038 nmp = NFSTONMP(np);
6039 if (nfs_mount_gone(nmp)) {
6040 return ENXIO;
6041 }
6042 nfsvers = nmp->nm_vers;
6043
6044 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6045 return error;
6046 }
6047
6048 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6049 nfsm_chain_null(&nmreq);
6050 nfsm_chain_null(&nmrep);
6051
6052 // PUTFH, CLOSE, GETATTR
6053 numops = 3;
6054 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6055 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6056 numops--;
6057 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6058 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6059 numops--;
6060 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
6061 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6062 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6063 numops--;
6064 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6065 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6066 nfsm_chain_build_done(error, &nmreq);
6067 nfsm_assert(error, (numops == 0), EPROTO);
6068 nfsmout_if(error);
6069 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6070
6071 if ((lockerror = nfs_node_lock(np))) {
6072 error = lockerror;
6073 }
6074 nfsm_chain_skip_tag(error, &nmrep);
6075 nfsm_chain_get_32(error, &nmrep, numops);
6076 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6077 nfsmout_if(error);
6078 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6079 nfs_owner_seqid_increment(noop, NULL, error);
6080 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6081 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6082 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6083nfsmout:
6084 if (!lockerror) {
6085 nfs_node_unlock(np);
6086 }
6087 nfs_open_owner_clear_busy(noop);
6088 nfsm_chain_cleanup(&nmreq);
6089 nfsm_chain_cleanup(&nmrep);
6090 return error;
6091}
6092
6093
6094/*
6095 * Claim the delegated open combinations this open file holds.
6096 */
6097int
6098nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6099{
6100 struct nfs_open_owner *noop = nofp->nof_owner;
6101 struct nfs_lock_owner *nlop;
6102 struct nfs_file_lock *nflp, *nextnflp;
6103 struct nfsmount *nmp;
6104 int error = 0, reopen = 0;
6105
6106 if (nofp->nof_d_rw_drw) {
6107 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6108 if (!error) {
6109 lck_mtx_lock(&nofp->nof_lock);
6110 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6111 nofp->nof_d_rw_drw = 0;
6112 lck_mtx_unlock(&nofp->nof_lock);
6113 }
6114 }
6115 if (!error && nofp->nof_d_w_drw) {
6116 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6117 if (!error) {
6118 lck_mtx_lock(&nofp->nof_lock);
6119 nofp->nof_w_drw += nofp->nof_d_w_drw;
6120 nofp->nof_d_w_drw = 0;
6121 lck_mtx_unlock(&nofp->nof_lock);
6122 }
6123 }
6124 if (!error && nofp->nof_d_r_drw) {
6125 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6126 if (!error) {
6127 lck_mtx_lock(&nofp->nof_lock);
6128 nofp->nof_r_drw += nofp->nof_d_r_drw;
6129 nofp->nof_d_r_drw = 0;
6130 lck_mtx_unlock(&nofp->nof_lock);
6131 }
6132 }
6133 if (!error && nofp->nof_d_rw_dw) {
6134 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6135 if (!error) {
6136 lck_mtx_lock(&nofp->nof_lock);
6137 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6138 nofp->nof_d_rw_dw = 0;
6139 lck_mtx_unlock(&nofp->nof_lock);
6140 }
6141 }
6142 if (!error && nofp->nof_d_w_dw) {
6143 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6144 if (!error) {
6145 lck_mtx_lock(&nofp->nof_lock);
6146 nofp->nof_w_dw += nofp->nof_d_w_dw;
6147 nofp->nof_d_w_dw = 0;
6148 lck_mtx_unlock(&nofp->nof_lock);
6149 }
6150 }
6151 if (!error && nofp->nof_d_r_dw) {
6152 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6153 if (!error) {
6154 lck_mtx_lock(&nofp->nof_lock);
6155 nofp->nof_r_dw += nofp->nof_d_r_dw;
6156 nofp->nof_d_r_dw = 0;
6157 lck_mtx_unlock(&nofp->nof_lock);
6158 }
6159 }
6160 /* non-deny-mode opens may be reopened if no locks are held */
6161 if (!error && nofp->nof_d_rw) {
6162 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6163 /* for some errors, we should just try reopening the file */
6164 if (nfs_mount_state_error_delegation_lost(error)) {
6165 reopen = error;
6166 }
6167 if (!error || reopen) {
6168 lck_mtx_lock(&nofp->nof_lock);
6169 nofp->nof_rw += nofp->nof_d_rw;
6170 nofp->nof_d_rw = 0;
6171 lck_mtx_unlock(&nofp->nof_lock);
6172 }
6173 }
6174 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6175 if ((!error || reopen) && nofp->nof_d_w) {
6176 if (!error) {
6177 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6178 /* for some errors, we should just try reopening the file */
6179 if (nfs_mount_state_error_delegation_lost(error)) {
6180 reopen = error;
6181 }
6182 }
6183 if (!error || reopen) {
6184 lck_mtx_lock(&nofp->nof_lock);
6185 nofp->nof_w += nofp->nof_d_w;
6186 nofp->nof_d_w = 0;
6187 lck_mtx_unlock(&nofp->nof_lock);
6188 }
6189 }
6190 if ((!error || reopen) && nofp->nof_d_r) {
6191 if (!error) {
6192 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6193 /* for some errors, we should just try reopening the file */
6194 if (nfs_mount_state_error_delegation_lost(error)) {
6195 reopen = error;
6196 }
6197 }
6198 if (!error || reopen) {
6199 lck_mtx_lock(&nofp->nof_lock);
6200 nofp->nof_r += nofp->nof_d_r;
6201 nofp->nof_d_r = 0;
6202 lck_mtx_unlock(&nofp->nof_lock);
6203 }
6204 }
6205
6206 if (reopen) {
6207 /*
6208 * Any problems with the delegation probably indicates that we
6209 * should review/return all of our current delegation state.
6210 */
6211 if ((nmp = NFSTONMP(nofp->nof_np))) {
6212 nfs4_delegation_return_enqueue(nofp->nof_np);
6213 lck_mtx_lock(&nmp->nm_lock);
6214 nfs_need_recover(nmp, NFSERR_EXPIRED);
6215 lck_mtx_unlock(&nmp->nm_lock);
6216 }
6217 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6218 /* just reopen the file on next access */
6219 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6220 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6221 lck_mtx_lock(&nofp->nof_lock);
6222 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6223 lck_mtx_unlock(&nofp->nof_lock);
6224 return 0;
6225 }
6226 if (reopen) {
6227 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6228 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6229 }
6230 }
6231
6232 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6233 /* claim delegated locks */
6234 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6235 if (nlop->nlo_open_owner != noop) {
6236 continue;
6237 }
6238 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6239 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6240 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6241 continue;
6242 }
6243 /* skip non-delegated locks */
6244 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6245 continue;
6246 }
6247 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6248 if (error) {
6249 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6250 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6251 break;
6252 }
6253 // else {
6254 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6255 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6256 // }
6257 }
6258 if (error) {
6259 break;
6260 }
6261 }
6262 }
6263
6264 if (!error) { /* all state claimed successfully! */
6265 return 0;
6266 }
6267
6268 /* restart if it looks like a problem more than just losing the delegation */
6269 if (!nfs_mount_state_error_delegation_lost(error) &&
6270 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6271 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6272 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6273 nfs_need_reconnect(nmp);
6274 }
6275 return error;
6276 }
6277
6278 /* delegated state lost (once held but now not claimable) */
6279 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6280
6281 /*
6282 * Any problems with the delegation probably indicates that we
6283 * should review/return all of our current delegation state.
6284 */
6285 if ((nmp = NFSTONMP(nofp->nof_np))) {
6286 nfs4_delegation_return_enqueue(nofp->nof_np);
6287 lck_mtx_lock(&nmp->nm_lock);
6288 nfs_need_recover(nmp, NFSERR_EXPIRED);
6289 lck_mtx_unlock(&nmp->nm_lock);
6290 }
6291
6292 /* revoke all open file state */
6293 nfs_revoke_open_state_for_node(nofp->nof_np);
6294
6295 return error;
6296}
6297#endif /* CONFIG_NFS4*/
6298
6299/*
6300 * Release all open state for the given node.
6301 */
6302void
6303nfs_release_open_state_for_node(nfsnode_t np, int force)
6304{
6305 struct nfsmount *nmp = NFSTONMP(np);
6306 struct nfs_open_file *nofp;
6307 struct nfs_file_lock *nflp, *nextnflp;
6308
6309 /* drop held locks */
6310 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6311 /* skip dead & blocked lock requests */
6312 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6313 continue;
6314 }
6315 /* send an unlock if not a delegated lock */
6316 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6317 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6318 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6319 }
6320 /* kill/remove the lock */
6321 lck_mtx_lock(&np->n_openlock);
6322 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6323 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6324 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6325 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6326 if (nflp->nfl_blockcnt) {
6327 /* wake up anyone blocked on this lock */
6328 wakeup(nflp);
6329 } else {
6330 /* remove nflp from lock list and destroy */
6331 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6332 nfs_file_lock_destroy(nflp);
6333 }
6334 lck_mtx_unlock(&np->n_openlock);
6335 }
6336
6337 lck_mtx_lock(&np->n_openlock);
6338
6339 /* drop all opens */
6340 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6341 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6342 continue;
6343 }
6344 /* mark open state as lost */
6345 lck_mtx_lock(&nofp->nof_lock);
6346 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6347 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6348
6349 lck_mtx_unlock(&nofp->nof_lock);
6350#if CONFIG_NFS4
6351 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6352 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6353 }
6354#endif
6355 }
6356
6357 lck_mtx_unlock(&np->n_openlock);
6358}
6359
6360/*
6361 * State for a node has been lost, drop it, and revoke the node.
6362 * Attempt to return any state if possible in case the server
6363 * might somehow think we hold it.
6364 */
6365void
6366nfs_revoke_open_state_for_node(nfsnode_t np)
6367{
6368 struct nfsmount *nmp;
6369
6370 /* mark node as needing to be revoked */
6371 nfs_node_lock_force(np);
6372 if (np->n_flag & NREVOKE) { /* already revoked? */
6373 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6374 nfs_node_unlock(np);
6375 return;
6376 }
6377 np->n_flag |= NREVOKE;
6378 nfs_node_unlock(np);
6379
6380 nfs_release_open_state_for_node(np, 0);
6381 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6382
6383 /* mark mount as needing a revoke scan and have the socket thread do it. */
6384 if ((nmp = NFSTONMP(np))) {
6385 lck_mtx_lock(&nmp->nm_lock);
6386 nmp->nm_state |= NFSSTA_REVOKE;
6387 nfs_mount_sock_thread_wake(nmp);
6388 lck_mtx_unlock(&nmp->nm_lock);
6389 }
6390}
6391
6392#if CONFIG_NFS4
6393/*
6394 * Claim the delegated open combinations that each of this node's open files hold.
6395 */
6396int
6397nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6398{
6399 struct nfs_open_file *nofp;
6400 int error = 0;
6401
6402 lck_mtx_lock(&np->n_openlock);
6403
6404 /* walk the open file list looking for opens with delegated state to claim */
6405restart:
6406 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6407 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6408 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6409 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6410 continue;
6411 }
6412 lck_mtx_unlock(&np->n_openlock);
6413 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6414 lck_mtx_lock(&np->n_openlock);
6415 if (error) {
6416 break;
6417 }
6418 goto restart;
6419 }
6420
6421 lck_mtx_unlock(&np->n_openlock);
6422
6423 return error;
6424}
6425
6426/*
6427 * Mark a node as needed to have its delegation returned.
6428 * Queue it up on the delegation return queue.
6429 * Make sure the thread is running.
6430 */
6431void
6432nfs4_delegation_return_enqueue(nfsnode_t np)
6433{
6434 struct nfsmount *nmp;
6435
6436 nmp = NFSTONMP(np);
6437 if (nfs_mount_gone(nmp)) {
6438 return;
6439 }
6440
6441 lck_mtx_lock(&np->n_openlock);
6442 np->n_openflags |= N_DELEG_RETURN;
6443 lck_mtx_unlock(&np->n_openlock);
6444
6445 lck_mtx_lock(&nmp->nm_lock);
6446 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6447 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6448 }
6449 nfs_mount_sock_thread_wake(nmp);
6450 lck_mtx_unlock(&nmp->nm_lock);
6451}
6452
6453/*
6454 * return any delegation we may have for the given node
6455 */
6456int
6457nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6458{
6459 struct nfsmount *nmp;
6460 fhandle_t fh;
6461 nfs_stateid dstateid;
6462 int error;
6463
6464 nmp = NFSTONMP(np);
6465 if (nfs_mount_gone(nmp)) {
6466 return ENXIO;
6467 }
6468
6469 /* first, make sure the node's marked for delegation return */
6470 lck_mtx_lock(&np->n_openlock);
6471 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6472 lck_mtx_unlock(&np->n_openlock);
6473
6474 /* make sure nobody else is using the delegation state */
6475 if ((error = nfs_open_state_set_busy(np, NULL))) {
6476 goto out;
6477 }
6478
6479 /* claim any delegated state */
6480 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6481 goto out;
6482 }
6483
6484 /* return the delegation */
6485 lck_mtx_lock(&np->n_openlock);
6486 dstateid = np->n_dstateid;
6487 fh.fh_len = np->n_fhsize;
6488 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
6489 lck_mtx_unlock(&np->n_openlock);
6490 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
6491 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6492 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6493 lck_mtx_lock(&np->n_openlock);
6494 np->n_openflags &= ~N_DELEG_MASK;
6495 lck_mtx_lock(&nmp->nm_lock);
6496 if (np->n_dlink.tqe_next != NFSNOLIST) {
6497 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6498 np->n_dlink.tqe_next = NFSNOLIST;
6499 }
6500 lck_mtx_unlock(&nmp->nm_lock);
6501 lck_mtx_unlock(&np->n_openlock);
6502 }
6503
6504out:
6505 /* make sure it's no longer on the return queue and clear the return flags */
6506 lck_mtx_lock(&nmp->nm_lock);
6507 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6508 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6509 np->n_dreturn.tqe_next = NFSNOLIST;
6510 }
6511 lck_mtx_unlock(&nmp->nm_lock);
6512 lck_mtx_lock(&np->n_openlock);
6513 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6514 lck_mtx_unlock(&np->n_openlock);
6515
6516 if (error) {
6517 NP(np, "nfs4_delegation_return, error %d", error);
6518 if (error == ETIMEDOUT) {
6519 nfs_need_reconnect(nmp);
6520 }
6521 if (nfs_mount_state_error_should_restart(error)) {
6522 /* make sure recovery happens */
6523 lck_mtx_lock(&nmp->nm_lock);
6524 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6525 lck_mtx_unlock(&nmp->nm_lock);
6526 }
6527 }
6528
6529 nfs_open_state_clear_busy(np);
6530
6531 return error;
6532}
6533
6534/*
6535 * RPC to return a delegation for a file handle
6536 */
6537int
6538nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6539{
6540 int error = 0, status, numops;
6541 uint64_t xid;
6542 struct nfsm_chain nmreq, nmrep;
6543 struct nfsreq_secinfo_args si;
6544
6545 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6546 nfsm_chain_null(&nmreq);
6547 nfsm_chain_null(&nmrep);
6548
6549 // PUTFH, DELEGRETURN
6550 numops = 2;
6551 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6552 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6553 numops--;
6554 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6555 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6556 numops--;
6557 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6558 nfsm_chain_add_stateid(error, &nmreq, sid);
6559 nfsm_chain_build_done(error, &nmreq);
6560 nfsm_assert(error, (numops == 0), EPROTO);
6561 nfsmout_if(error);
6562 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6563 nfsm_chain_skip_tag(error, &nmrep);
6564 nfsm_chain_get_32(error, &nmrep, numops);
6565 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6566 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6567nfsmout:
6568 nfsm_chain_cleanup(&nmreq);
6569 nfsm_chain_cleanup(&nmrep);
6570 return error;
6571}
6572#endif /* CONFIG_NFS4 */
6573
6574/*
6575 * NFS read call.
6576 * Just call nfs_bioread() to do the work.
6577 *
6578 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6579 * without first calling VNOP_OPEN, so we make sure the file is open here.
6580 */
6581int
6582nfs_vnop_read(
6583 struct vnop_read_args /* {
6584 * struct vnodeop_desc *a_desc;
6585 * vnode_t a_vp;
6586 * struct uio *a_uio;
6587 * int a_ioflag;
6588 * vfs_context_t a_context;
6589 * } */*ap)
6590{
6591 vnode_t vp = ap->a_vp;
6592 vfs_context_t ctx = ap->a_context;
6593 nfsnode_t np;
6594 struct nfsmount *nmp;
6595 struct nfs_open_owner *noop;
6596 struct nfs_open_file *nofp;
6597 int error;
6598
6599 if (vnode_vtype(ap->a_vp) != VREG) {
6600 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6601 }
6602
6603 np = VTONFS(vp);
6604 nmp = NFSTONMP(np);
6605 if (nfs_mount_gone(nmp)) {
6606 return ENXIO;
6607 }
6608 if (np->n_flag & NREVOKE) {
6609 return EIO;
6610 }
6611
6612 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6613 if (!noop) {
6614 return ENOMEM;
6615 }
6616restart:
6617 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6618 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6619 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6620 error = EIO;
6621 }
6622#if CONFIG_NFS4
6623 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6624 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6625 nofp = NULL;
6626 if (!error) {
6627 goto restart;
6628 }
6629 }
6630#endif
6631 if (error) {
6632 nfs_open_owner_rele(noop);
6633 return error;
6634 }
6635 /*
6636 * Since the read path is a hot path, if we already have
6637 * read access, lets go and try and do the read, without
6638 * busying the mount and open file node for this open owner.
6639 *
6640 * N.B. This is inherently racy w.r.t. an execve using
6641 * an already open file, in that the read at the end of
6642 * this routine will be racing with a potential close.
6643 * The code below ultimately has the same problem. In practice
6644 * this does not seem to be an issue.
6645 */
6646 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6647 nfs_open_owner_rele(noop);
6648 goto do_read;
6649 }
6650 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6651 if (error) {
6652 nfs_open_owner_rele(noop);
6653 return error;
6654 }
6655 /*
6656 * If we don't have a file already open with the access we need (read) then
6657 * we need to open one. Otherwise we just co-opt an open. We might not already
6658 * have access because we're trying to read the first page of the
6659 * file for execve.
6660 */
6661 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6662 if (error) {
6663 nfs_mount_state_in_use_end(nmp, 0);
6664 nfs_open_owner_rele(noop);
6665 return error;
6666 }
6667 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6668 /* we don't have the file open, so open it for read access if we're not denied */
6669 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6670 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6671 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6672 }
6673 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6674 nfs_open_file_clear_busy(nofp);
6675 nfs_mount_state_in_use_end(nmp, 0);
6676 nfs_open_owner_rele(noop);
6677 return EPERM;
6678 }
6679 if (np->n_flag & NREVOKE) {
6680 error = EIO;
6681 nfs_open_file_clear_busy(nofp);
6682 nfs_mount_state_in_use_end(nmp, 0);
6683 nfs_open_owner_rele(noop);
6684 return error;
6685 }
6686 if (nmp->nm_vers < NFS_VER4) {
6687 /* NFS v2/v3 opens are always allowed - so just add it. */
6688 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6689 }
6690#if CONFIG_NFS4
6691 else {
6692 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6693 }
6694#endif
6695 if (!error) {
6696 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6697 }
6698 }
6699 if (nofp) {
6700 nfs_open_file_clear_busy(nofp);
6701 }
6702 if (nfs_mount_state_in_use_end(nmp, error)) {
6703 nofp = NULL;
6704 goto restart;
6705 }
6706 nfs_open_owner_rele(noop);
6707 if (error) {
6708 return error;
6709 }
6710do_read:
6711 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6712}
6713
6714#if CONFIG_NFS4
6715/*
6716 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6717 * Files are created using the NFSv4 OPEN RPC. So we must open the
6718 * file to create it and then close it.
6719 */
6720int
6721nfs4_vnop_create(
6722 struct vnop_create_args /* {
6723 * struct vnodeop_desc *a_desc;
6724 * vnode_t a_dvp;
6725 * vnode_t *a_vpp;
6726 * struct componentname *a_cnp;
6727 * struct vnode_attr *a_vap;
6728 * vfs_context_t a_context;
6729 * } */*ap)
6730{
6731 vfs_context_t ctx = ap->a_context;
6732 struct componentname *cnp = ap->a_cnp;
6733 struct vnode_attr *vap = ap->a_vap;
6734 vnode_t dvp = ap->a_dvp;
6735 vnode_t *vpp = ap->a_vpp;
6736 struct nfsmount *nmp;
6737 nfsnode_t np;
6738 int error = 0, busyerror = 0, accessMode, denyMode;
6739 struct nfs_open_owner *noop = NULL;
6740 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6741
6742 nmp = VTONMP(dvp);
6743 if (nfs_mount_gone(nmp)) {
6744 return ENXIO;
6745 }
6746
6747 if (vap) {
6748 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6749 }
6750
6751 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6752 if (!noop) {
6753 return ENOMEM;
6754 }
6755
6756restart:
6757 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6758 if (error) {
6759 nfs_open_owner_rele(noop);
6760 return error;
6761 }
6762
6763 /* grab a provisional, nodeless open file */
6764 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6765 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6766 printf("nfs_vnop_create: LOST\n");
6767 error = EIO;
6768 }
6769 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6770 /* This shouldn't happen given that this is a new, nodeless nofp */
6771 nfs_mount_state_in_use_end(nmp, 0);
6772 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6773 nfs_open_file_destroy(newnofp);
6774 newnofp = NULL;
6775 if (!error) {
6776 goto restart;
6777 }
6778 }
6779 if (!error) {
6780 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6781 }
6782 if (error) {
6783 if (newnofp) {
6784 nfs_open_file_destroy(newnofp);
6785 }
6786 newnofp = NULL;
6787 goto out;
6788 }
6789
6790 /*
6791 * We're just trying to create the file.
6792 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6793 */
6794 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6795 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6796
6797 /* Do the open/create */
6798 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6799 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6800 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6801 /*
6802 * Hmm... it looks like we may have a situation where the request was
6803 * retransmitted because we didn't get the first response which successfully
6804 * created/opened the file and then the second time we were denied the open
6805 * because the mode the file was created with doesn't allow write access.
6806 *
6807 * We'll try to work around this by temporarily updating the mode and
6808 * retrying the open.
6809 */
6810 struct vnode_attr vattr;
6811
6812 /* first make sure it's there */
6813 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6814 if (!error2 && np) {
6815 nfs_node_unlock(np);
6816 *vpp = NFSTOV(np);
6817 if (vnode_vtype(NFSTOV(np)) == VREG) {
6818 VATTR_INIT(&vattr);
6819 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6820 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6821 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6822 VATTR_INIT(&vattr);
6823 VATTR_SET(&vattr, va_mode, vap->va_mode);
6824 nfs4_setattr_rpc(np, &vattr, ctx);
6825 if (!error2) {
6826 error = 0;
6827 }
6828 }
6829 }
6830 if (error) {
6831 vnode_put(*vpp);
6832 *vpp = NULL;
6833 }
6834 }
6835 }
6836 if (!error && !*vpp) {
6837 printf("nfs4_open_rpc returned without a node?\n");
6838 /* Hmmm... with no node, we have no filehandle and can't close it */
6839 error = EIO;
6840 }
6841 if (error) {
6842 /* need to cleanup our temporary nofp */
6843 nfs_open_file_clear_busy(newnofp);
6844 nfs_open_file_destroy(newnofp);
6845 newnofp = NULL;
6846 goto out;
6847 }
6848 /* After we have a node, add our open file struct to the node */
6849 np = VTONFS(*vpp);
6850 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6851 nofp = newnofp;
6852 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6853 if (error) {
6854 /* This shouldn't happen, because we passed in a new nofp to use. */
6855 printf("nfs_open_file_find_internal failed! %d\n", error);
6856 goto out;
6857 } else if (nofp != newnofp) {
6858 /*
6859 * Hmm... an open file struct already exists.
6860 * Mark the existing one busy and merge our open into it.
6861 * Then destroy the one we created.
6862 * Note: there's no chance of an open confict because the
6863 * open has already been granted.
6864 */
6865 busyerror = nfs_open_file_set_busy(nofp, NULL);
6866 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6867 nofp->nof_stateid = newnofp->nof_stateid;
6868 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6869 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6870 }
6871 nfs_open_file_clear_busy(newnofp);
6872 nfs_open_file_destroy(newnofp);
6873 }
6874 newnofp = NULL;
6875 /* mark the node as holding a create-initiated open */
6876 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6877 nofp->nof_creator = current_thread();
6878out:
6879 if (nofp && !busyerror) {
6880 nfs_open_file_clear_busy(nofp);
6881 }
6882 if (nfs_mount_state_in_use_end(nmp, error)) {
6883 nofp = newnofp = NULL;
6884 busyerror = 0;
6885 goto restart;
6886 }
6887 if (noop) {
6888 nfs_open_owner_rele(noop);
6889 }
6890 return error;
6891}
6892
6893/*
6894 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6895 */
6896int
6897nfs4_create_rpc(
6898 vfs_context_t ctx,
6899 nfsnode_t dnp,
6900 struct componentname *cnp,
6901 struct vnode_attr *vap,
6902 int type,
6903 char *link,
6904 nfsnode_t *npp)
6905{
6906 struct nfsmount *nmp;
6907 struct nfs_vattr nvattr;
6908 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6909 int nfsvers, namedattrs, numops;
6910 u_int64_t xid, savedxid = 0;
6911 nfsnode_t np = NULL;
6912 vnode_t newvp = NULL;
6913 struct nfsm_chain nmreq, nmrep;
6914 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6915 const char *tag;
6916 nfs_specdata sd;
6917 fhandle_t fh;
6918 struct nfsreq rq, *req = &rq;
6919 struct nfs_dulookup dul;
6920 struct nfsreq_secinfo_args si;
6921
6922 nmp = NFSTONMP(dnp);
6923 if (nfs_mount_gone(nmp)) {
6924 return ENXIO;
6925 }
6926 nfsvers = nmp->nm_vers;
6927 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6928 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
6929 return EINVAL;
6930 }
6931
6932 sd.specdata1 = sd.specdata2 = 0;
6933
6934 switch (type) {
6935 case NFLNK:
6936 tag = "symlink";
6937 break;
6938 case NFBLK:
6939 case NFCHR:
6940 tag = "mknod";
6941 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
6942 return EINVAL;
6943 }
6944 sd.specdata1 = major(vap->va_rdev);
6945 sd.specdata2 = minor(vap->va_rdev);
6946 break;
6947 case NFSOCK:
6948 case NFFIFO:
6949 tag = "mknod";
6950 break;
6951 case NFDIR:
6952 tag = "mkdir";
6953 break;
6954 default:
6955 return EINVAL;
6956 }
6957
6958 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6959
6960 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6961 if (!namedattrs) {
6962 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6963 }
6964
6965 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6966 NVATTR_INIT(&nvattr);
6967 nfsm_chain_null(&nmreq);
6968 nfsm_chain_null(&nmrep);
6969
6970 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6971 numops = 6;
6972 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6973 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6974 numops--;
6975 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6976 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6977 numops--;
6978 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6979 numops--;
6980 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6981 nfsm_chain_add_32(error, &nmreq, type);
6982 if (type == NFLNK) {
6983 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6984 } else if ((type == NFBLK) || (type == NFCHR)) {
6985 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6986 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6987 }
6988 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6989 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6990 numops--;
6991 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6992 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6993 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6994 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6995 numops--;
6996 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6997 numops--;
6998 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6999 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7000 nfsm_chain_build_done(error, &nmreq);
7001 nfsm_assert(error, (numops == 0), EPROTO);
7002 nfsmout_if(error);
7003
7004 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7005 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7006 if (!error) {
7007 if (!namedattrs) {
7008 nfs_dulookup_start(&dul, dnp, ctx);
7009 }
7010 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7011 }
7012
7013 if ((lockerror = nfs_node_lock(dnp))) {
7014 error = lockerror;
7015 }
7016 nfsm_chain_skip_tag(error, &nmrep);
7017 nfsm_chain_get_32(error, &nmrep, numops);
7018 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7019 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7020 nfsmout_if(error);
7021 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7022 nfsm_chain_check_change_info(error, &nmrep, dnp);
7023 bmlen = NFS_ATTR_BITMAP_LEN;
7024 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7025 /* At this point if we have no error, the object was created. */
7026 /* if we don't get attributes, then we should lookitup. */
7027 create_error = error;
7028 nfsmout_if(error);
7029 nfs_vattr_set_supported(bitmap, vap);
7030 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7031 nfsmout_if(error);
7032 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7033 nfsmout_if(error);
7034 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7035 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7036 error = EBADRPC;
7037 goto nfsmout;
7038 }
7039 /* directory attributes: if we don't get them, make sure to invalidate */
7040 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7041 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7042 savedxid = xid;
7043 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7044 if (error) {
7045 NATTRINVALIDATE(dnp);
7046 }
7047
7048nfsmout:
7049 nfsm_chain_cleanup(&nmreq);
7050 nfsm_chain_cleanup(&nmrep);
7051
7052 if (!lockerror) {
7053 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7054 dnp->n_flag &= ~NNEGNCENTRIES;
7055 cache_purge_negatives(NFSTOV(dnp));
7056 }
7057 dnp->n_flag |= NMODIFIED;
7058 nfs_node_unlock(dnp);
7059 /* nfs_getattr() will check changed and purge caches */
7060 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7061 }
7062
7063 if (!error && fh.fh_len) {
7064 /* create the vnode with the filehandle and attributes */
7065 xid = savedxid;
7066 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
7067 if (!error) {
7068 newvp = NFSTOV(np);
7069 }
7070 }
7071 NVATTR_CLEANUP(&nvattr);
7072
7073 if (!namedattrs) {
7074 nfs_dulookup_finish(&dul, dnp, ctx);
7075 }
7076
7077 /*
7078 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7079 * if we can succeed in looking up the object.
7080 */
7081 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7082 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7083 if (!error) {
7084 newvp = NFSTOV(np);
7085 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7086 error = EEXIST;
7087 }
7088 }
7089 }
7090 if (!busyerror) {
7091 nfs_node_clear_busy(dnp);
7092 }
7093 if (error) {
7094 if (newvp) {
7095 nfs_node_unlock(np);
7096 vnode_put(newvp);
7097 }
7098 } else {
7099 nfs_node_unlock(np);
7100 *npp = np;
7101 }
7102 return error;
7103}
7104
7105int
7106nfs4_vnop_mknod(
7107 struct vnop_mknod_args /* {
7108 * struct vnodeop_desc *a_desc;
7109 * vnode_t a_dvp;
7110 * vnode_t *a_vpp;
7111 * struct componentname *a_cnp;
7112 * struct vnode_attr *a_vap;
7113 * vfs_context_t a_context;
7114 * } */*ap)
7115{
7116 nfsnode_t np = NULL;
7117 struct nfsmount *nmp;
7118 int error;
7119
7120 nmp = VTONMP(ap->a_dvp);
7121 if (nfs_mount_gone(nmp)) {
7122 return ENXIO;
7123 }
7124
7125 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7126 return EINVAL;
7127 }
7128 switch (ap->a_vap->va_type) {
7129 case VBLK:
7130 case VCHR:
7131 case VFIFO:
7132 case VSOCK:
7133 break;
7134 default:
7135 return ENOTSUP;
7136 }
7137
7138 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7139 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7140 if (!error) {
7141 *ap->a_vpp = NFSTOV(np);
7142 }
7143 return error;
7144}
7145
7146int
7147nfs4_vnop_mkdir(
7148 struct vnop_mkdir_args /* {
7149 * struct vnodeop_desc *a_desc;
7150 * vnode_t a_dvp;
7151 * vnode_t *a_vpp;
7152 * struct componentname *a_cnp;
7153 * struct vnode_attr *a_vap;
7154 * vfs_context_t a_context;
7155 * } */*ap)
7156{
7157 nfsnode_t np = NULL;
7158 int error;
7159
7160 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7161 NFDIR, NULL, &np);
7162 if (!error) {
7163 *ap->a_vpp = NFSTOV(np);
7164 }
7165 return error;
7166}
7167
7168int
7169nfs4_vnop_symlink(
7170 struct vnop_symlink_args /* {
7171 * struct vnodeop_desc *a_desc;
7172 * vnode_t a_dvp;
7173 * vnode_t *a_vpp;
7174 * struct componentname *a_cnp;
7175 * struct vnode_attr *a_vap;
7176 * char *a_target;
7177 * vfs_context_t a_context;
7178 * } */*ap)
7179{
7180 nfsnode_t np = NULL;
7181 int error;
7182
7183 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7184 NFLNK, ap->a_target, &np);
7185 if (!error) {
7186 *ap->a_vpp = NFSTOV(np);
7187 }
7188 return error;
7189}
7190
7191int
7192nfs4_vnop_link(
7193 struct vnop_link_args /* {
7194 * struct vnodeop_desc *a_desc;
7195 * vnode_t a_vp;
7196 * vnode_t a_tdvp;
7197 * struct componentname *a_cnp;
7198 * vfs_context_t a_context;
7199 * } */*ap)
7200{
7201 vfs_context_t ctx = ap->a_context;
7202 vnode_t vp = ap->a_vp;
7203 vnode_t tdvp = ap->a_tdvp;
7204 struct componentname *cnp = ap->a_cnp;
7205 int error = 0, lockerror = ENOENT, status;
7206 struct nfsmount *nmp;
7207 nfsnode_t np = VTONFS(vp);
7208 nfsnode_t tdnp = VTONFS(tdvp);
7209 int nfsvers, numops;
7210 u_int64_t xid, savedxid;
7211 struct nfsm_chain nmreq, nmrep;
7212 struct nfsreq_secinfo_args si;
7213
7214 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7215 return EXDEV;
7216 }
7217
7218 nmp = VTONMP(vp);
7219 if (nfs_mount_gone(nmp)) {
7220 return ENXIO;
7221 }
7222 nfsvers = nmp->nm_vers;
7223 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7224 return EINVAL;
7225 }
7226 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7227 return EINVAL;
7228 }
7229
7230 /*
7231 * Push all writes to the server, so that the attribute cache
7232 * doesn't get "out of sync" with the server.
7233 * XXX There should be a better way!
7234 */
7235 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7236
7237 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7238 return error;
7239 }
7240
7241 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7242 nfsm_chain_null(&nmreq);
7243 nfsm_chain_null(&nmrep);
7244
7245 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7246 numops = 7;
7247 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7248 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7249 numops--;
7250 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7251 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7252 numops--;
7253 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7254 numops--;
7255 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7256 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7257 numops--;
7258 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7259 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7260 numops--;
7261 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7262 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7263 numops--;
7264 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7265 numops--;
7266 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7267 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7268 nfsm_chain_build_done(error, &nmreq);
7269 nfsm_assert(error, (numops == 0), EPROTO);
7270 nfsmout_if(error);
7271 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7272
7273 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7274 error = lockerror;
7275 goto nfsmout;
7276 }
7277 nfsm_chain_skip_tag(error, &nmrep);
7278 nfsm_chain_get_32(error, &nmrep, numops);
7279 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7280 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7281 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7282 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7283 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7284 /* directory attributes: if we don't get them, make sure to invalidate */
7285 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7286 savedxid = xid;
7287 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7288 if (error) {
7289 NATTRINVALIDATE(tdnp);
7290 }
7291 /* link attributes: if we don't get them, make sure to invalidate */
7292 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7294 xid = savedxid;
7295 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7296 if (error) {
7297 NATTRINVALIDATE(np);
7298 }
7299nfsmout:
7300 nfsm_chain_cleanup(&nmreq);
7301 nfsm_chain_cleanup(&nmrep);
7302 if (!lockerror) {
7303 tdnp->n_flag |= NMODIFIED;
7304 }
7305 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7306 if (error == EEXIST) {
7307 error = 0;
7308 }
7309 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7310 tdnp->n_flag &= ~NNEGNCENTRIES;
7311 cache_purge_negatives(tdvp);
7312 }
7313 if (!lockerror) {
7314 nfs_node_unlock2(tdnp, np);
7315 }
7316 nfs_node_clear_busy2(tdnp, np);
7317 return error;
7318}
7319
7320int
7321nfs4_vnop_rmdir(
7322 struct vnop_rmdir_args /* {
7323 * struct vnodeop_desc *a_desc;
7324 * vnode_t a_dvp;
7325 * vnode_t a_vp;
7326 * struct componentname *a_cnp;
7327 * vfs_context_t a_context;
7328 * } */*ap)
7329{
7330 vfs_context_t ctx = ap->a_context;
7331 vnode_t vp = ap->a_vp;
7332 vnode_t dvp = ap->a_dvp;
7333 struct componentname *cnp = ap->a_cnp;
7334 struct nfsmount *nmp;
7335 int error = 0, namedattrs;
7336 nfsnode_t np = VTONFS(vp);
7337 nfsnode_t dnp = VTONFS(dvp);
7338 struct nfs_dulookup dul;
7339
7340 if (vnode_vtype(vp) != VDIR) {
7341 return EINVAL;
7342 }
7343
7344 nmp = NFSTONMP(dnp);
7345 if (nfs_mount_gone(nmp)) {
7346 return ENXIO;
7347 }
7348 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7349
7350 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7351 return error;
7352 }
7353
7354 if (!namedattrs) {
7355 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7356 nfs_dulookup_start(&dul, dnp, ctx);
7357 }
7358
7359 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7360 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7361
7362 nfs_name_cache_purge(dnp, np, cnp, ctx);
7363 /* nfs_getattr() will check changed and purge caches */
7364 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7365 if (!namedattrs) {
7366 nfs_dulookup_finish(&dul, dnp, ctx);
7367 }
7368 nfs_node_clear_busy2(dnp, np);
7369
7370 /*
7371 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7372 */
7373 if (error == ENOENT) {
7374 error = 0;
7375 }
7376 if (!error) {
7377 /*
7378 * remove nfsnode from hash now so we can't accidentally find it
7379 * again if another object gets created with the same filehandle
7380 * before this vnode gets reclaimed
7381 */
7382 lck_mtx_lock(nfs_node_hash_mutex);
7383 if (np->n_hflag & NHHASHED) {
7384 LIST_REMOVE(np, n_hash);
7385 np->n_hflag &= ~NHHASHED;
7386 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7387 }
7388 lck_mtx_unlock(nfs_node_hash_mutex);
7389 }
7390 return error;
7391}
7392
7393/*
7394 * NFSv4 Named Attributes
7395 *
7396 * Both the extended attributes interface and the named streams interface
7397 * are backed by NFSv4 named attributes. The implementations for both use
7398 * a common set of routines in an attempt to reduce code duplication, to
7399 * increase efficiency, to increase caching of both names and data, and to
7400 * confine the complexity.
7401 *
7402 * Each NFS node caches its named attribute directory's file handle.
7403 * The directory nodes for the named attribute directories are handled
7404 * exactly like regular directories (with a couple minor exceptions).
7405 * Named attribute nodes are also treated as much like regular files as
7406 * possible.
7407 *
7408 * Most of the heavy lifting is done by nfs4_named_attr_get().
7409 */
7410
7411/*
7412 * Get the given node's attribute directory node.
7413 * If !fetch, then only return a cached node.
7414 * Otherwise, we will attempt to fetch the node from the server.
7415 * (Note: the node should be marked busy.)
7416 */
7417nfsnode_t
7418nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7419{
7420 nfsnode_t adnp = NULL;
7421 struct nfsmount *nmp;
7422 int error = 0, status, numops;
7423 struct nfsm_chain nmreq, nmrep;
7424 u_int64_t xid;
7425 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7426 fhandle_t fh;
7427 struct nfs_vattr nvattr;
7428 struct componentname cn;
7429 struct nfsreq rq, *req = &rq;
7430 struct nfsreq_secinfo_args si;
7431
7432 nmp = NFSTONMP(np);
7433 if (nfs_mount_gone(nmp)) {
7434 return NULL;
7435 }
7436 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7437 return NULL;
7438 }
7439
7440 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7441 NVATTR_INIT(&nvattr);
7442 nfsm_chain_null(&nmreq);
7443 nfsm_chain_null(&nmrep);
7444
7445 bzero(&cn, sizeof(cn));
7446 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7447 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7448 cn.cn_nameiop = LOOKUP;
7449
7450 if (np->n_attrdirfh) {
7451 // XXX can't set parent correctly (to np) yet
7452 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7453 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7454 if (adnp) {
7455 goto nfsmout;
7456 }
7457 }
7458 if (!fetch) {
7459 error = ENOENT;
7460 goto nfsmout;
7461 }
7462
7463 // PUTFH, OPENATTR, GETATTR
7464 numops = 3;
7465 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7466 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7467 numops--;
7468 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7469 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7470 numops--;
7471 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7472 nfsm_chain_add_32(error, &nmreq, 0);
7473 numops--;
7474 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7475 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7476 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7477 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7478 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7479 nfsm_chain_build_done(error, &nmreq);
7480 nfsm_assert(error, (numops == 0), EPROTO);
7481 nfsmout_if(error);
7482 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7483 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7484 if (!error) {
7485 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7486 }
7487
7488 nfsm_chain_skip_tag(error, &nmrep);
7489 nfsm_chain_get_32(error, &nmrep, numops);
7490 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7491 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7492 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7493 nfsmout_if(error);
7494 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7495 nfsmout_if(error);
7496 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7497 error = ENOENT;
7498 goto nfsmout;
7499 }
7500 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7501 /* (re)allocate attrdir fh buffer */
7502 if (np->n_attrdirfh) {
7503 FREE(np->n_attrdirfh, M_TEMP);
7504 }
7505 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7506 }
7507 if (!np->n_attrdirfh) {
7508 error = ENOMEM;
7509 goto nfsmout;
7510 }
7511 /* cache the attrdir fh in the node */
7512 *np->n_attrdirfh = fh.fh_len;
7513 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7514 /* create node for attrdir */
7515 // XXX can't set parent correctly (to np) yet
7516 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7517nfsmout:
7518 NVATTR_CLEANUP(&nvattr);
7519 nfsm_chain_cleanup(&nmreq);
7520 nfsm_chain_cleanup(&nmrep);
7521
7522 if (adnp) {
7523 /* sanity check that this node is an attribute directory */
7524 if (adnp->n_vattr.nva_type != VDIR) {
7525 error = EINVAL;
7526 }
7527 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7528 error = EINVAL;
7529 }
7530 nfs_node_unlock(adnp);
7531 if (error) {
7532 vnode_put(NFSTOV(adnp));
7533 }
7534 }
7535 return error ? NULL : adnp;
7536}
7537
7538/*
7539 * Get the given node's named attribute node for the name given.
7540 *
7541 * In an effort to increase the performance of named attribute access, we try
7542 * to reduce server requests by doing the following:
7543 *
7544 * - cache the node's named attribute directory file handle in the node
7545 * - maintain a directory vnode for the attribute directory
7546 * - use name cache entries (positive and negative) to speed up lookups
7547 * - optionally open the named attribute (with the given accessMode) in the same RPC
7548 * - combine attribute directory retrieval with the lookup/open RPC
7549 * - optionally prefetch the named attribute's first block of data in the same RPC
7550 *
7551 * Also, in an attempt to reduce the number of copies/variations of this code,
7552 * parts of the RPC building/processing code are conditionalized on what is
7553 * needed for any particular request (openattr, lookup vs. open, read).
7554 *
7555 * Note that because we may not have the attribute directory node when we start
7556 * the lookup/open, we lock both the node and the attribute directory node.
7557 */
7558
7559#define NFS_GET_NAMED_ATTR_CREATE 0x1
7560#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7561#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7562#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7563
7564int
7565nfs4_named_attr_get(
7566 nfsnode_t np,
7567 struct componentname *cnp,
7568 uint32_t accessMode,
7569 int flags,
7570 vfs_context_t ctx,
7571 nfsnode_t *anpp,
7572 struct nfs_open_file **nofpp)
7573{
7574 struct nfsmount *nmp;
7575 int error = 0, open_error = EIO;
7576 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7577 int create, guarded, prefetch, truncate, noopbusy = 0;
7578 int open, status, numops, hadattrdir, negnamecache;
7579 struct nfs_vattr nvattr;
7580 struct vnode_attr vattr;
7581 nfsnode_t adnp = NULL, anp = NULL;
7582 vnode_t avp = NULL;
7583 u_int64_t xid, savedxid = 0;
7584 struct nfsm_chain nmreq, nmrep;
7585 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7586 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7587 nfs_stateid stateid, dstateid;
7588 fhandle_t fh;
7589 struct nfs_open_owner *noop = NULL;
7590 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7591 struct vnop_access_args naa;
7592 thread_t thd;
7593 kauth_cred_t cred;
7594 struct timeval now;
7595 char sbuf[64], *s;
7596 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7597 struct kauth_ace ace;
7598 struct nfsreq rq, *req = &rq;
7599 struct nfsreq_secinfo_args si;
7600
7601 *anpp = NULL;
7602 fh.fh_len = 0;
7603 rflags = delegation = recall = eof = rlen = retlen = 0;
7604 ace.ace_flags = 0;
7605 s = sbuf;
7606 slen = sizeof(sbuf);
7607
7608 nmp = NFSTONMP(np);
7609 if (nfs_mount_gone(nmp)) {
7610 return ENXIO;
7611 }
7612 NVATTR_INIT(&nvattr);
7613 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7614 thd = vfs_context_thread(ctx);
7615 cred = vfs_context_ucred(ctx);
7616 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7617 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7618 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7619 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7620
7621 if (!create) {
7622 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7623 if (error) {
7624 return error;
7625 }
7626 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7627 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7628 return ENOATTR;
7629 }
7630 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7631 /* shouldn't happen... but just be safe */
7632 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7633 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7634 }
7635 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7636 if (open) {
7637 /*
7638 * We're trying to open the file.
7639 * We'll create/open it with the given access mode,
7640 * and set NFS_OPEN_FILE_CREATE.
7641 */
7642 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7643 if (prefetch && guarded) {
7644 prefetch = 0; /* no sense prefetching data that can't be there */
7645 }
7646 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7647 if (!noop) {
7648 return ENOMEM;
7649 }
7650 }
7651
7652 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7653 return error;
7654 }
7655
7656 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7657 hadattrdir = (adnp != NULL);
7658 if (prefetch) {
7659 microuptime(&now);
7660 /* use the special state ID because we don't have a real one to send */
7661 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7662 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7663 }
7664 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7665 nfsm_chain_null(&nmreq);
7666 nfsm_chain_null(&nmrep);
7667
7668 if (hadattrdir) {
7669 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7670 goto nfsmout;
7671 }
7672 /* nfs_getattr() will check changed and purge caches */
7673 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7674 nfsmout_if(error);
7675 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7676 switch (error) {
7677 case ENOENT:
7678 /* negative cache entry */
7679 goto nfsmout;
7680 case 0:
7681 /* cache miss */
7682 /* try dir buf cache lookup */
7683 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7684 if (!error && anp) {
7685 /* dir buf cache hit */
7686 *anpp = anp;
7687 error = -1;
7688 }
7689 if (error != -1) { /* cache miss */
7690 break;
7691 }
7692 /* FALLTHROUGH */
7693 case -1:
7694 /* cache hit, not really an error */
7695 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7696 if (!anp && avp) {
7697 *anpp = anp = VTONFS(avp);
7698 }
7699
7700 nfs_node_clear_busy(adnp);
7701 adbusyerror = ENOENT;
7702
7703 /* check for directory access */
7704 naa.a_desc = &vnop_access_desc;
7705 naa.a_vp = NFSTOV(adnp);
7706 naa.a_action = KAUTH_VNODE_SEARCH;
7707 naa.a_context = ctx;
7708
7709 /* compute actual success/failure based on accessibility */
7710 error = nfs_vnop_access(&naa);
7711 /* FALLTHROUGH */
7712 default:
7713 /* we either found it, or hit an error */
7714 if (!error && guarded) {
7715 /* found cached entry but told not to use it */
7716 error = EEXIST;
7717 vnode_put(NFSTOV(anp));
7718 *anpp = anp = NULL;
7719 }
7720 /* we're done if error or we don't need to open */
7721 if (error || !open) {
7722 goto nfsmout;
7723 }
7724 /* no error and we need to open... */
7725 }
7726 }
7727
7728 if (open) {
7729restart:
7730 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7731 if (error) {
7732 nfs_open_owner_rele(noop);
7733 noop = NULL;
7734 goto nfsmout;
7735 }
7736 inuse = 1;
7737
7738 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7739 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7740 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7741 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7742 error = EIO;
7743 }
7744 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7745 nfs_mount_state_in_use_end(nmp, 0);
7746 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7747 nfs_open_file_destroy(newnofp);
7748 newnofp = NULL;
7749 if (!error) {
7750 goto restart;
7751 }
7752 }
7753 if (!error) {
7754 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7755 }
7756 if (error) {
7757 if (newnofp) {
7758 nfs_open_file_destroy(newnofp);
7759 }
7760 newnofp = NULL;
7761 goto nfsmout;
7762 }
7763 if (anp) {
7764 /*
7765 * We already have the node. So we just need to open
7766 * it - which we may be able to do with a delegation.
7767 */
7768 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7769 if (!error) {
7770 /* open succeeded, so our open file is no longer temporary */
7771 nofp = newnofp;
7772 nofpbusyerror = 0;
7773 newnofp = NULL;
7774 if (nofpp) {
7775 *nofpp = nofp;
7776 }
7777 }
7778 goto nfsmout;
7779 }
7780 }
7781
7782 /*
7783 * We either don't have the attrdir or we didn't find the attribute
7784 * in the name cache, so we need to talk to the server.
7785 *
7786 * If we don't have the attrdir, we'll need to ask the server for that too.
7787 * If the caller is requesting that the attribute be created, we need to
7788 * make sure the attrdir is created.
7789 * The caller may also request that the first block of an existing attribute
7790 * be retrieved at the same time.
7791 */
7792
7793 if (open) {
7794 /* need to mark the open owner busy during the RPC */
7795 if ((error = nfs_open_owner_set_busy(noop, thd))) {
7796 goto nfsmout;
7797 }
7798 noopbusy = 1;
7799 }
7800
7801 /*
7802 * We'd like to get updated post-open/lookup attributes for the
7803 * directory and we may also want to prefetch some data via READ.
7804 * We'd like the READ results to be last so that we can leave the
7805 * data in the mbufs until the end.
7806 *
7807 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7808 */
7809 numops = 5;
7810 if (!hadattrdir) {
7811 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7812 }
7813 if (prefetch) {
7814 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7815 }
7816 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7817 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7818 if (hadattrdir) {
7819 numops--;
7820 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7821 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7822 } else {
7823 numops--;
7824 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7825 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7826 numops--;
7827 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7828 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7829 numops--;
7830 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7831 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7832 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7833 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7834 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7835 }
7836 if (open) {
7837 numops--;
7838 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7839 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7840 nfsm_chain_add_32(error, &nmreq, accessMode);
7841 nfsm_chain_add_32(error, &nmreq, denyMode);
7842 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7843 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7844 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7845 nfsm_chain_add_32(error, &nmreq, create);
7846 if (create) {
7847 nfsm_chain_add_32(error, &nmreq, guarded);
7848 VATTR_INIT(&vattr);
7849 if (truncate) {
7850 VATTR_SET(&vattr, va_data_size, 0);
7851 }
7852 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7853 }
7854 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7855 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7856 } else {
7857 numops--;
7858 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7859 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7860 }
7861 numops--;
7862 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7863 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7864 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7865 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7866 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7867 if (prefetch) {
7868 numops--;
7869 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7870 }
7871 if (hadattrdir) {
7872 numops--;
7873 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7874 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7875 } else {
7876 numops--;
7877 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7878 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7879 numops--;
7880 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7881 nfsm_chain_add_32(error, &nmreq, 0);
7882 }
7883 numops--;
7884 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7885 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7886 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7887 if (prefetch) {
7888 numops--;
7889 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7890 numops--;
7891 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7892 VATTR_INIT(&vattr);
7893 VATTR_SET(&vattr, va_data_size, 0);
7894 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7895 numops--;
7896 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7897 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7898 nfsm_chain_add_64(error, &nmreq, 0);
7899 nfsm_chain_add_32(error, &nmreq, rlen);
7900 }
7901 nfsm_chain_build_done(error, &nmreq);
7902 nfsm_assert(error, (numops == 0), EPROTO);
7903 nfsmout_if(error);
7904 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7905 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7906 if (!error) {
7907 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7908 }
7909
7910 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
7911 error = adlockerror;
7912 }
7913 savedxid = xid;
7914 nfsm_chain_skip_tag(error, &nmrep);
7915 nfsm_chain_get_32(error, &nmrep, numops);
7916 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7917 if (!hadattrdir) {
7918 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7919 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7920 nfsmout_if(error);
7921 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7922 nfsmout_if(error);
7923 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7924 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7925 /* (re)allocate attrdir fh buffer */
7926 if (np->n_attrdirfh) {
7927 FREE(np->n_attrdirfh, M_TEMP);
7928 }
7929 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7930 }
7931 if (np->n_attrdirfh) {
7932 /* remember the attrdir fh in the node */
7933 *np->n_attrdirfh = fh.fh_len;
7934 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7935 /* create busied node for attrdir */
7936 struct componentname cn;
7937 bzero(&cn, sizeof(cn));
7938 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7939 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7940 cn.cn_nameiop = LOOKUP;
7941 // XXX can't set parent correctly (to np) yet
7942 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7943 if (!error) {
7944 adlockerror = 0;
7945 /* set the node busy */
7946 SET(adnp->n_flag, NBUSY);
7947 adbusyerror = 0;
7948 }
7949 /* if no adnp, oh well... */
7950 error = 0;
7951 }
7952 }
7953 NVATTR_CLEANUP(&nvattr);
7954 fh.fh_len = 0;
7955 }
7956 if (open) {
7957 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7958 nfs_owner_seqid_increment(noop, NULL, error);
7959 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7960 nfsm_chain_check_change_info(error, &nmrep, adnp);
7961 nfsm_chain_get_32(error, &nmrep, rflags);
7962 bmlen = NFS_ATTR_BITMAP_LEN;
7963 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7964 nfsm_chain_get_32(error, &nmrep, delegation);
7965 if (!error) {
7966 switch (delegation) {
7967 case NFS_OPEN_DELEGATE_NONE:
7968 break;
7969 case NFS_OPEN_DELEGATE_READ:
7970 case NFS_OPEN_DELEGATE_WRITE:
7971 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7972 nfsm_chain_get_32(error, &nmrep, recall);
7973 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
7974 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7975 }
7976 /* if we have any trouble accepting the ACE, just invalidate it */
7977 ace_type = ace_flags = ace_mask = len = 0;
7978 nfsm_chain_get_32(error, &nmrep, ace_type);
7979 nfsm_chain_get_32(error, &nmrep, ace_flags);
7980 nfsm_chain_get_32(error, &nmrep, ace_mask);
7981 nfsm_chain_get_32(error, &nmrep, len);
7982 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7983 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7984 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7985 if (!error && (len >= slen)) {
7986 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
7987 if (s) {
7988 slen = len + 1;
7989 } else {
7990 ace.ace_flags = 0;
7991 }
7992 }
7993 if (s) {
7994 nfsm_chain_get_opaque(error, &nmrep, len, s);
7995 } else {
7996 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7997 }
7998 if (!error && s) {
7999 s[len] = '\0';
8000 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
8001 ace.ace_flags = 0;
8002 }
8003 }
8004 if (error || !s) {
8005 ace.ace_flags = 0;
8006 }
8007 if (s && (s != sbuf)) {
8008 FREE(s, M_TEMP);
8009 }
8010 break;
8011 default:
8012 error = EBADRPC;
8013 break;
8014 }
8015 }
8016 /* At this point if we have no error, the object was created/opened. */
8017 open_error = error;
8018 } else {
8019 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8020 }
8021 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8022 nfsmout_if(error);
8023 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
8024 nfsmout_if(error);
8025 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
8026 error = EIO;
8027 goto nfsmout;
8028 }
8029 if (prefetch) {
8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
8031 }
8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8033 if (!hadattrdir) {
8034 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8035 }
8036 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8037 nfsmout_if(error);
8038 xid = savedxid;
8039 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8040 nfsmout_if(error);
8041
8042 if (open) {
8043 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8044 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8045 }
8046 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8047 if (adnp) {
8048 nfs_node_unlock(adnp);
8049 adlockerror = ENOENT;
8050 }
8051 NVATTR_CLEANUP(&nvattr);
8052 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
8053 nfsmout_if(error);
8054 savedxid = xid;
8055 if ((adlockerror = nfs_node_lock(adnp))) {
8056 error = adlockerror;
8057 }
8058 }
8059 }
8060
8061nfsmout:
8062 if (open && adnp && !adlockerror) {
8063 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8064 adnp->n_flag &= ~NNEGNCENTRIES;
8065 cache_purge_negatives(NFSTOV(adnp));
8066 }
8067 adnp->n_flag |= NMODIFIED;
8068 nfs_node_unlock(adnp);
8069 adlockerror = ENOENT;
8070 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8071 }
8072 if (adnp && !adlockerror && (error == ENOENT) &&
8073 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8074 /* add a negative entry in the name cache */
8075 cache_enter(NFSTOV(adnp), NULL, cnp);
8076 adnp->n_flag |= NNEGNCENTRIES;
8077 }
8078 if (adnp && !adlockerror) {
8079 nfs_node_unlock(adnp);
8080 adlockerror = ENOENT;
8081 }
8082 if (!error && !anp && fh.fh_len) {
8083 /* create the vnode with the filehandle and attributes */
8084 xid = savedxid;
8085 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
8086 if (!error) {
8087 *anpp = anp;
8088 nfs_node_unlock(anp);
8089 }
8090 if (!error && open) {
8091 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8092 /* After we have a node, add our open file struct to the node */
8093 nofp = newnofp;
8094 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8095 if (error) {
8096 /* This shouldn't happen, because we passed in a new nofp to use. */
8097 printf("nfs_open_file_find_internal failed! %d\n", error);
8098 nofp = NULL;
8099 } else if (nofp != newnofp) {
8100 /*
8101 * Hmm... an open file struct already exists.
8102 * Mark the existing one busy and merge our open into it.
8103 * Then destroy the one we created.
8104 * Note: there's no chance of an open confict because the
8105 * open has already been granted.
8106 */
8107 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8108 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8109 nofp->nof_stateid = newnofp->nof_stateid;
8110 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8111 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8112 }
8113 nfs_open_file_clear_busy(newnofp);
8114 nfs_open_file_destroy(newnofp);
8115 newnofp = NULL;
8116 }
8117 if (!error) {
8118 newnofp = NULL;
8119 nofpbusyerror = 0;
8120 /* mark the node as holding a create-initiated open */
8121 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8122 nofp->nof_creator = current_thread();
8123 if (nofpp) {
8124 *nofpp = nofp;
8125 }
8126 }
8127 }
8128 }
8129 NVATTR_CLEANUP(&nvattr);
8130 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8131 if (!error && anp && !recall) {
8132 /* stuff the delegation state in the node */
8133 lck_mtx_lock(&anp->n_openlock);
8134 anp->n_openflags &= ~N_DELEG_MASK;
8135 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8136 anp->n_dstateid = dstateid;
8137 anp->n_dace = ace;
8138 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8139 lck_mtx_lock(&nmp->nm_lock);
8140 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8141 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8142 }
8143 lck_mtx_unlock(&nmp->nm_lock);
8144 }
8145 lck_mtx_unlock(&anp->n_openlock);
8146 } else {
8147 /* give the delegation back */
8148 if (anp) {
8149 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
8150 /* update delegation state and return it */
8151 lck_mtx_lock(&anp->n_openlock);
8152 anp->n_openflags &= ~N_DELEG_MASK;
8153 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8154 anp->n_dstateid = dstateid;
8155 anp->n_dace = ace;
8156 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8157 lck_mtx_lock(&nmp->nm_lock);
8158 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8159 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8160 }
8161 lck_mtx_unlock(&nmp->nm_lock);
8162 }
8163 lck_mtx_unlock(&anp->n_openlock);
8164 /* don't need to send a separate delegreturn for fh */
8165 fh.fh_len = 0;
8166 }
8167 /* return anp's current delegation */
8168 nfs4_delegation_return(anp, 0, thd, cred);
8169 }
8170 if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
8171 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
8172 }
8173 }
8174 }
8175 if (open) {
8176 if (newnofp) {
8177 /* need to cleanup our temporary nofp */
8178 nfs_open_file_clear_busy(newnofp);
8179 nfs_open_file_destroy(newnofp);
8180 newnofp = NULL;
8181 } else if (nofp && !nofpbusyerror) {
8182 nfs_open_file_clear_busy(nofp);
8183 nofpbusyerror = ENOENT;
8184 }
8185 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8186 inuse = 0;
8187 nofp = newnofp = NULL;
8188 rflags = delegation = recall = eof = rlen = retlen = 0;
8189 ace.ace_flags = 0;
8190 s = sbuf;
8191 slen = sizeof(sbuf);
8192 nfsm_chain_cleanup(&nmreq);
8193 nfsm_chain_cleanup(&nmrep);
8194 if (anp) {
8195 vnode_put(NFSTOV(anp));
8196 *anpp = anp = NULL;
8197 }
8198 hadattrdir = (adnp != NULL);
8199 if (noopbusy) {
8200 nfs_open_owner_clear_busy(noop);
8201 noopbusy = 0;
8202 }
8203 goto restart;
8204 }
8205 if (noop) {
8206 if (noopbusy) {
8207 nfs_open_owner_clear_busy(noop);
8208 noopbusy = 0;
8209 }
8210 nfs_open_owner_rele(noop);
8211 }
8212 }
8213 if (!error && prefetch && nmrep.nmc_mhead) {
8214 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8215 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8216 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8217 nfsm_chain_get_32(error, &nmrep, eof);
8218 nfsm_chain_get_32(error, &nmrep, retlen);
8219 if (!error && anp) {
8220 /*
8221 * There can be one problem with doing the prefetch.
8222 * Because we don't have the node before we start the RPC, we
8223 * can't have the buffer busy while the READ is performed.
8224 * So there is a chance that other I/O occured on the same
8225 * range of data while we were performing this RPC. If that
8226 * happens, then it's possible the data we have in the READ
8227 * response is no longer up to date.
8228 * Once we have the node and the buffer, we need to make sure
8229 * that there's no chance we could be putting stale data in
8230 * the buffer.
8231 * So, we check if the range read is dirty or if any I/O may
8232 * have occured on it while we were performing our RPC.
8233 */
8234 struct nfsbuf *bp = NULL;
8235 int lastpg;
8236 uint32_t pagemask;
8237
8238 retlen = MIN(retlen, rlen);
8239
8240 /* check if node needs size update or invalidation */
8241 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8242 nfs_data_update_size(anp, 0);
8243 }
8244 if (!(error = nfs_node_lock(anp))) {
8245 if (anp->n_flag & NNEEDINVALIDATE) {
8246 anp->n_flag &= ~NNEEDINVALIDATE;
8247 nfs_node_unlock(anp);
8248 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8249 if (!error) { /* lets play it safe and just drop the data */
8250 error = EIO;
8251 }
8252 } else {
8253 nfs_node_unlock(anp);
8254 }
8255 }
8256
8257 /* calculate page mask for the range of data read */
8258 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
8259 pagemask = ((1 << (lastpg + 1)) - 1);
8260
8261 if (!error) {
8262 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8263 }
8264 /* don't save the data if dirty or potential I/O conflict */
8265 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
8266 timevalcmp(&anp->n_lastio, &now, <)) {
8267 OSAddAtomic64(1, &nfsstats.read_bios);
8268 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8269 SET(bp->nb_flags, NB_READ);
8270 NFS_BUF_MAP(bp);
8271 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8272 if (error) {
8273 bp->nb_error = error;
8274 SET(bp->nb_flags, NB_ERROR);
8275 } else {
8276 bp->nb_offio = 0;
8277 bp->nb_endio = rlen;
8278 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8279 bp->nb_endio = retlen;
8280 }
8281 if (eof || (retlen == 0)) {
8282 /* zero out the remaining data (up to EOF) */
8283 off_t rpcrem, eofrem, rem;
8284 rpcrem = (rlen - retlen);
8285 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8286 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8287 if (rem > 0) {
8288 bzero(bp->nb_data + retlen, rem);
8289 }
8290 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8291 /* ugh... short read ... just invalidate for now... */
8292 SET(bp->nb_flags, NB_INVAL);
8293 }
8294 }
8295 nfs_buf_read_finish(bp);
8296 microuptime(&anp->n_lastio);
8297 }
8298 if (bp) {
8299 nfs_buf_release(bp, 1);
8300 }
8301 }
8302 error = 0; /* ignore any transient error in processing the prefetch */
8303 }
8304 if (adnp && !adbusyerror) {
8305 nfs_node_clear_busy(adnp);
8306 adbusyerror = ENOENT;
8307 }
8308 if (!busyerror) {
8309 nfs_node_clear_busy(np);
8310 busyerror = ENOENT;
8311 }
8312 if (adnp) {
8313 vnode_put(NFSTOV(adnp));
8314 }
8315 if (error && *anpp) {
8316 vnode_put(NFSTOV(*anpp));
8317 *anpp = NULL;
8318 }
8319 nfsm_chain_cleanup(&nmreq);
8320 nfsm_chain_cleanup(&nmrep);
8321 return error;
8322}
8323
8324/*
8325 * Remove a named attribute.
8326 */
8327int
8328nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8329{
8330 nfsnode_t adnp = NULL;
8331 struct nfsmount *nmp;
8332 struct componentname cn;
8333 struct vnop_remove_args vra;
8334 int error, putanp = 0;
8335
8336 nmp = NFSTONMP(np);
8337 if (nfs_mount_gone(nmp)) {
8338 return ENXIO;
8339 }
8340
8341 bzero(&cn, sizeof(cn));
8342 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8343 cn.cn_namelen = strlen(name);
8344 cn.cn_nameiop = DELETE;
8345 cn.cn_flags = 0;
8346
8347 if (!anp) {
8348 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8349 0, ctx, &anp, NULL);
8350 if ((!error && !anp) || (error == ENOATTR)) {
8351 error = ENOENT;
8352 }
8353 if (error) {
8354 if (anp) {
8355 vnode_put(NFSTOV(anp));
8356 anp = NULL;
8357 }
8358 goto out;
8359 }
8360 putanp = 1;
8361 }
8362
8363 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8364 goto out;
8365 }
8366 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8367 nfs_node_clear_busy(np);
8368 if (!adnp) {
8369 error = ENOENT;
8370 goto out;
8371 }
8372
8373 vra.a_desc = &vnop_remove_desc;
8374 vra.a_dvp = NFSTOV(adnp);
8375 vra.a_vp = NFSTOV(anp);
8376 vra.a_cnp = &cn;
8377 vra.a_flags = 0;
8378 vra.a_context = ctx;
8379 error = nfs_vnop_remove(&vra);
8380out:
8381 if (adnp) {
8382 vnode_put(NFSTOV(adnp));
8383 }
8384 if (putanp) {
8385 vnode_put(NFSTOV(anp));
8386 }
8387 return error;
8388}
8389
8390int
8391nfs4_vnop_getxattr(
8392 struct vnop_getxattr_args /* {
8393 * struct vnodeop_desc *a_desc;
8394 * vnode_t a_vp;
8395 * const char * a_name;
8396 * uio_t a_uio;
8397 * size_t *a_size;
8398 * int a_options;
8399 * vfs_context_t a_context;
8400 * } */*ap)
8401{
8402 vfs_context_t ctx = ap->a_context;
8403 struct nfsmount *nmp;
8404 struct nfs_vattr nvattr;
8405 struct componentname cn;
8406 nfsnode_t anp;
8407 int error = 0, isrsrcfork;
8408
8409 nmp = VTONMP(ap->a_vp);
8410 if (nfs_mount_gone(nmp)) {
8411 return ENXIO;
8412 }
8413
8414 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8415 return ENOTSUP;
8416 }
8417 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8418 if (error) {
8419 return error;
8420 }
8421 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8422 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8423 return ENOATTR;
8424 }
8425
8426 bzero(&cn, sizeof(cn));
8427 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8428 cn.cn_namelen = strlen(ap->a_name);
8429 cn.cn_nameiop = LOOKUP;
8430 cn.cn_flags = MAKEENTRY;
8431
8432 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8433 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8434
8435 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8436 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8437 if ((!error && !anp) || (error == ENOENT)) {
8438 error = ENOATTR;
8439 }
8440 if (!error) {
8441 if (ap->a_uio) {
8442 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8443 } else {
8444 *ap->a_size = anp->n_size;
8445 }
8446 }
8447 if (anp) {
8448 vnode_put(NFSTOV(anp));
8449 }
8450 return error;
8451}
8452
8453int
8454nfs4_vnop_setxattr(
8455 struct vnop_setxattr_args /* {
8456 * struct vnodeop_desc *a_desc;
8457 * vnode_t a_vp;
8458 * const char * a_name;
8459 * uio_t a_uio;
8460 * int a_options;
8461 * vfs_context_t a_context;
8462 * } */*ap)
8463{
8464 vfs_context_t ctx = ap->a_context;
8465 int options = ap->a_options;
8466 uio_t uio = ap->a_uio;
8467 const char *name = ap->a_name;
8468 struct nfsmount *nmp;
8469 struct componentname cn;
8470 nfsnode_t anp = NULL;
8471 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8472#define FINDERINFOSIZE 32
8473 uint8_t finfo[FINDERINFOSIZE];
8474 uint32_t *finfop;
8475 struct nfs_open_file *nofp = NULL;
8476 char uio_buf[UIO_SIZEOF(1)];
8477 uio_t auio;
8478 struct vnop_write_args vwa;
8479
8480 nmp = VTONMP(ap->a_vp);
8481 if (nfs_mount_gone(nmp)) {
8482 return ENXIO;
8483 }
8484
8485 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8486 return ENOTSUP;
8487 }
8488
8489 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8490 return EINVAL;
8491 }
8492
8493 /* XXX limitation based on need to back up uio on short write */
8494 if (uio_iovcnt(uio) > 1) {
8495 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8496 return EINVAL;
8497 }
8498
8499 bzero(&cn, sizeof(cn));
8500 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8501 cn.cn_namelen = strlen(name);
8502 cn.cn_nameiop = CREATE;
8503 cn.cn_flags = MAKEENTRY;
8504
8505 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8506 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8507 if (!isrsrcfork) {
8508 uio_setoffset(uio, 0);
8509 }
8510 if (isfinderinfo) {
8511 if (uio_resid(uio) != sizeof(finfo)) {
8512 return ERANGE;
8513 }
8514 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8515 if (error) {
8516 return error;
8517 }
8518 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8519 empty = 1;
8520 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8521 if (finfop[i]) {
8522 empty = 0;
8523 break;
8524 }
8525 }
8526 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8527 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8528 if (error == ENOENT) {
8529 error = 0;
8530 }
8531 return error;
8532 }
8533 /* first, let's see if we get a create/replace error */
8534 }
8535
8536 /*
8537 * create/open the xattr
8538 *
8539 * We need to make sure not to create it if XATTR_REPLACE.
8540 * For all xattrs except the resource fork, we also want to
8541 * truncate the xattr to remove any current data. We'll do
8542 * that by setting the size to 0 on create/open.
8543 */
8544 flags = 0;
8545 if (!(options & XATTR_REPLACE)) {
8546 flags |= NFS_GET_NAMED_ATTR_CREATE;
8547 }
8548 if (options & XATTR_CREATE) {
8549 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8550 }
8551 if (!isrsrcfork) {
8552 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8553 }
8554
8555 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8556 flags, ctx, &anp, &nofp);
8557 if (!error && !anp) {
8558 error = ENOATTR;
8559 }
8560 if (error) {
8561 goto out;
8562 }
8563 /* grab the open state from the get/create/open */
8564 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8565 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8566 nofp->nof_creator = NULL;
8567 nfs_open_file_clear_busy(nofp);
8568 }
8569
8570 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8571 if (isfinderinfo && empty) {
8572 goto doclose;
8573 }
8574
8575 /*
8576 * Write the data out and flush.
8577 *
8578 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8579 */
8580 vwa.a_desc = &vnop_write_desc;
8581 vwa.a_vp = NFSTOV(anp);
8582 vwa.a_uio = NULL;
8583 vwa.a_ioflag = 0;
8584 vwa.a_context = ctx;
8585 if (isfinderinfo) {
8586 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8587 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8588 vwa.a_uio = auio;
8589 } else if (uio_resid(uio) > 0) {
8590 vwa.a_uio = uio;
8591 }
8592 if (vwa.a_uio) {
8593 error = nfs_vnop_write(&vwa);
8594 if (!error) {
8595 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8596 }
8597 }
8598doclose:
8599 /* Close the xattr. */
8600 if (nofp) {
8601 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8602 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8603 if (!busyerror) {
8604 nfs_open_file_clear_busy(nofp);
8605 }
8606 }
8607 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8608 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8609 if (error == ENOENT) {
8610 error = 0;
8611 }
8612 }
8613 if (!error) {
8614 error = closeerror;
8615 }
8616out:
8617 if (anp) {
8618 vnode_put(NFSTOV(anp));
8619 }
8620 if (error == ENOENT) {
8621 error = ENOATTR;
8622 }
8623 return error;
8624}
8625
8626int
8627nfs4_vnop_removexattr(
8628 struct vnop_removexattr_args /* {
8629 * struct vnodeop_desc *a_desc;
8630 * vnode_t a_vp;
8631 * const char * a_name;
8632 * int a_options;
8633 * vfs_context_t a_context;
8634 * } */*ap)
8635{
8636 struct nfsmount *nmp = VTONMP(ap->a_vp);
8637 int error;
8638
8639 if (nfs_mount_gone(nmp)) {
8640 return ENXIO;
8641 }
8642 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8643 return ENOTSUP;
8644 }
8645
8646 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8647 if (error == ENOENT) {
8648 error = ENOATTR;
8649 }
8650 return error;
8651}
8652
8653int
8654nfs4_vnop_listxattr(
8655 struct vnop_listxattr_args /* {
8656 * struct vnodeop_desc *a_desc;
8657 * vnode_t a_vp;
8658 * uio_t a_uio;
8659 * size_t *a_size;
8660 * int a_options;
8661 * vfs_context_t a_context;
8662 * } */*ap)
8663{
8664 vfs_context_t ctx = ap->a_context;
8665 nfsnode_t np = VTONFS(ap->a_vp);
8666 uio_t uio = ap->a_uio;
8667 nfsnode_t adnp = NULL;
8668 struct nfsmount *nmp;
8669 int error, done, i;
8670 struct nfs_vattr nvattr;
8671 uint64_t cookie, nextcookie, lbn = 0;
8672 struct nfsbuf *bp = NULL;
8673 struct nfs_dir_buf_header *ndbhp;
8674 struct direntry *dp;
8675
8676 nmp = VTONMP(ap->a_vp);
8677 if (nfs_mount_gone(nmp)) {
8678 return ENXIO;
8679 }
8680
8681 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8682 return ENOTSUP;
8683 }
8684
8685 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8686 if (error) {
8687 return error;
8688 }
8689 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8690 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8691 return 0;
8692 }
8693
8694 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8695 return error;
8696 }
8697 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8698 nfs_node_clear_busy(np);
8699 if (!adnp) {
8700 goto out;
8701 }
8702
8703 if ((error = nfs_node_lock(adnp))) {
8704 goto out;
8705 }
8706
8707 if (adnp->n_flag & NNEEDINVALIDATE) {
8708 adnp->n_flag &= ~NNEEDINVALIDATE;
8709 nfs_invaldir(adnp);
8710 nfs_node_unlock(adnp);
8711 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8712 if (!error) {
8713 error = nfs_node_lock(adnp);
8714 }
8715 if (error) {
8716 goto out;
8717 }
8718 }
8719
8720 /*
8721 * check for need to invalidate when (re)starting at beginning
8722 */
8723 if (adnp->n_flag & NMODIFIED) {
8724 nfs_invaldir(adnp);
8725 nfs_node_unlock(adnp);
8726 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
8727 goto out;
8728 }
8729 } else {
8730 nfs_node_unlock(adnp);
8731 }
8732 /* nfs_getattr() will check changed and purge caches */
8733 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
8734 goto out;
8735 }
8736
8737 if (uio && (uio_resid(uio) == 0)) {
8738 goto out;
8739 }
8740
8741 done = 0;
8742 nextcookie = lbn = 0;
8743
8744 while (!error && !done) {
8745 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8746 cookie = nextcookie;
8747getbuffer:
8748 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8749 if (error) {
8750 goto out;
8751 }
8752 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8753 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8754 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8755 ndbhp->ndbh_flags = 0;
8756 ndbhp->ndbh_count = 0;
8757 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8758 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8759 }
8760 error = nfs_buf_readdir(bp, ctx);
8761 if (error == NFSERR_DIRBUFDROPPED) {
8762 goto getbuffer;
8763 }
8764 if (error) {
8765 nfs_buf_release(bp, 1);
8766 }
8767 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8768 if (!nfs_node_lock(adnp)) {
8769 nfs_invaldir(adnp);
8770 nfs_node_unlock(adnp);
8771 }
8772 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8773 if (error == NFSERR_BAD_COOKIE) {
8774 error = ENOENT;
8775 }
8776 }
8777 if (error) {
8778 goto out;
8779 }
8780 }
8781
8782 /* go through all the entries copying/counting */
8783 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8784 for (i = 0; i < ndbhp->ndbh_count; i++) {
8785 if (!xattr_protected(dp->d_name)) {
8786 if (uio == NULL) {
8787 *ap->a_size += dp->d_namlen + 1;
8788 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8789 error = ERANGE;
8790 } else {
8791 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8792 if (error && (error != EFAULT)) {
8793 error = ERANGE;
8794 }
8795 }
8796 }
8797 nextcookie = dp->d_seekoff;
8798 dp = NFS_DIRENTRY_NEXT(dp);
8799 }
8800
8801 if (i == ndbhp->ndbh_count) {
8802 /* hit end of buffer, move to next buffer */
8803 lbn = nextcookie;
8804 /* if we also hit EOF, we're done */
8805 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
8806 done = 1;
8807 }
8808 }
8809 if (!error && !done && (nextcookie == cookie)) {
8810 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8811 error = EIO;
8812 }
8813 nfs_buf_release(bp, 1);
8814 }
8815out:
8816 if (adnp) {
8817 vnode_put(NFSTOV(adnp));
8818 }
8819 return error;
8820}
8821
8822#if NAMEDSTREAMS
8823int
8824nfs4_vnop_getnamedstream(
8825 struct vnop_getnamedstream_args /* {
8826 * struct vnodeop_desc *a_desc;
8827 * vnode_t a_vp;
8828 * vnode_t *a_svpp;
8829 * const char *a_name;
8830 * enum nsoperation a_operation;
8831 * int a_flags;
8832 * vfs_context_t a_context;
8833 * } */*ap)
8834{
8835 vfs_context_t ctx = ap->a_context;
8836 struct nfsmount *nmp;
8837 struct nfs_vattr nvattr;
8838 struct componentname cn;
8839 nfsnode_t anp;
8840 int error = 0;
8841
8842 nmp = VTONMP(ap->a_vp);
8843 if (nfs_mount_gone(nmp)) {
8844 return ENXIO;
8845 }
8846
8847 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8848 return ENOTSUP;
8849 }
8850 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8851 if (error) {
8852 return error;
8853 }
8854 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8855 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8856 return ENOATTR;
8857 }
8858
8859 bzero(&cn, sizeof(cn));
8860 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8861 cn.cn_namelen = strlen(ap->a_name);
8862 cn.cn_nameiop = LOOKUP;
8863 cn.cn_flags = MAKEENTRY;
8864
8865 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8866 0, ctx, &anp, NULL);
8867 if ((!error && !anp) || (error == ENOENT)) {
8868 error = ENOATTR;
8869 }
8870 if (!error && anp) {
8871 *ap->a_svpp = NFSTOV(anp);
8872 } else if (anp) {
8873 vnode_put(NFSTOV(anp));
8874 }
8875 return error;
8876}
8877
8878int
8879nfs4_vnop_makenamedstream(
8880 struct vnop_makenamedstream_args /* {
8881 * struct vnodeop_desc *a_desc;
8882 * vnode_t *a_svpp;
8883 * vnode_t a_vp;
8884 * const char *a_name;
8885 * int a_flags;
8886 * vfs_context_t a_context;
8887 * } */*ap)
8888{
8889 vfs_context_t ctx = ap->a_context;
8890 struct nfsmount *nmp;
8891 struct componentname cn;
8892 nfsnode_t anp;
8893 int error = 0;
8894
8895 nmp = VTONMP(ap->a_vp);
8896 if (nfs_mount_gone(nmp)) {
8897 return ENXIO;
8898 }
8899
8900 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8901 return ENOTSUP;
8902 }
8903
8904 bzero(&cn, sizeof(cn));
8905 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8906 cn.cn_namelen = strlen(ap->a_name);
8907 cn.cn_nameiop = CREATE;
8908 cn.cn_flags = MAKEENTRY;
8909
8910 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8911 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8912 if ((!error && !anp) || (error == ENOENT)) {
8913 error = ENOATTR;
8914 }
8915 if (!error && anp) {
8916 *ap->a_svpp = NFSTOV(anp);
8917 } else if (anp) {
8918 vnode_put(NFSTOV(anp));
8919 }
8920 return error;
8921}
8922
8923int
8924nfs4_vnop_removenamedstream(
8925 struct vnop_removenamedstream_args /* {
8926 * struct vnodeop_desc *a_desc;
8927 * vnode_t a_vp;
8928 * vnode_t a_svp;
8929 * const char *a_name;
8930 * int a_flags;
8931 * vfs_context_t a_context;
8932 * } */*ap)
8933{
8934 struct nfsmount *nmp = VTONMP(ap->a_vp);
8935 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8936 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8937
8938 if (nfs_mount_gone(nmp)) {
8939 return ENXIO;
8940 }
8941
8942 /*
8943 * Given that a_svp is a named stream, checking for
8944 * named attribute support is kinda pointless.
8945 */
8946 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8947 return ENOTSUP;
8948 }
8949
8950 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
8951}
8952
8953#endif
8954#endif /* CONFIG_NFS4 */