]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
xnu-3789.41.3.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
52
53 #include <vfs/vfs_support.h>
54
55 #include <sys/vm.h>
56
57 #include <sys/time.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
60
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
63
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
66 #include <nfs/nfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
73
74 #include <net/if.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
78
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
81
82 int
83 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
84 {
85 int error = 0, lockerror = ENOENT, status, numops, slot;
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
89 uint32_t access_result = 0, supported = 0, missing;
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
93 struct nfsreq_secinfo_args si;
94
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
96 return (0);
97
98 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
99 nfsm_chain_null(&nmreq);
100 nfsm_chain_null(&nmrep);
101
102 // PUTFH, ACCESS, GETATTR
103 numops = 3;
104 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
105 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
106 numops--;
107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
108 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
109 numops--;
110 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
111 nfsm_chain_add_32(error, &nmreq, *access);
112 numops--;
113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
114 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
115 nfsm_chain_build_done(error, &nmreq);
116 nfsm_assert(error, (numops == 0), EPROTO);
117 nfsmout_if(error);
118 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
119 vfs_context_thread(ctx), vfs_context_ucred(ctx),
120 &si, rpcflags, &nmrep, &xid, &status);
121
122 if ((lockerror = nfs_node_lock(np)))
123 error = lockerror;
124 nfsm_chain_skip_tag(error, &nmrep);
125 nfsm_chain_get_32(error, &nmrep, numops);
126 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
127 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
128 nfsm_chain_get_32(error, &nmrep, supported);
129 nfsm_chain_get_32(error, &nmrep, access_result);
130 nfsmout_if(error);
131 if ((missing = (*access & ~supported))) {
132 /* missing support for something(s) we wanted */
133 if (missing & NFS_ACCESS_DELETE) {
134 /*
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
139 */
140 access_result |= NFS_ACCESS_DELETE;
141 }
142 }
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs) {
145 vnode_t dvp = NULLVP;
146 if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */
147 access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE);
148 else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD))
149 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
150 if (dvp != NULLVP)
151 vnode_put(dvp);
152 }
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE))
155 access_result |= NFS_ACCESS_DELETE;
156 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
157 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
158 nfsmout_if(error);
159
160 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
161 slot = nfs_node_access_slot(np, uid, 1);
162 np->n_accessuid[slot] = uid;
163 microuptime(&now);
164 np->n_accessstamp[slot] = now.tv_sec;
165 np->n_access[slot] = access_result;
166
167 /* pass back the access returned with this request */
168 *access = np->n_access[slot];
169 nfsmout:
170 if (!lockerror)
171 nfs_node_unlock(np);
172 nfsm_chain_cleanup(&nmreq);
173 nfsm_chain_cleanup(&nmrep);
174 return (error);
175 }
176
177 int
178 nfs4_getattr_rpc(
179 nfsnode_t np,
180 mount_t mp,
181 u_char *fhp,
182 size_t fhsize,
183 int flags,
184 vfs_context_t ctx,
185 struct nfs_vattr *nvap,
186 u_int64_t *xidp)
187 {
188 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
189 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
190 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
191 struct nfsm_chain nmreq, nmrep;
192 struct nfsreq_secinfo_args si;
193
194 if (nfs_mount_gone(nmp))
195 return (ENXIO);
196 nfsvers = nmp->nm_vers;
197 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
198
199 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
200 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
201 return (0);
202 }
203
204 if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
205 rpcflags = R_RECOVER;
206
207 if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */
208 rpcflags |= R_SOFT;
209
210 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
211 nfsm_chain_null(&nmreq);
212 nfsm_chain_null(&nmrep);
213
214 // PUTFH, GETATTR
215 numops = 2;
216 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
217 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
218 numops--;
219 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
220 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
221 numops--;
222 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
223 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
224 if ((flags & NGA_ACL) && acls)
225 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
226 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
227 nfsm_chain_build_done(error, &nmreq);
228 nfsm_assert(error, (numops == 0), EPROTO);
229 nfsmout_if(error);
230 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
231 vfs_context_thread(ctx), vfs_context_ucred(ctx),
232 NULL, rpcflags, &nmrep, xidp, &status);
233
234 nfsm_chain_skip_tag(error, &nmrep);
235 nfsm_chain_get_32(error, &nmrep, numops);
236 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
237 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
238 nfsmout_if(error);
239 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
240 nfsmout_if(error);
241 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
242 /* we asked for the ACL but didn't get one... assume there isn't one */
243 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
244 nvap->nva_acl = NULL;
245 }
246 nfsmout:
247 nfsm_chain_cleanup(&nmreq);
248 nfsm_chain_cleanup(&nmrep);
249 return (error);
250 }
251
252 int
253 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
254 {
255 struct nfsmount *nmp;
256 int error = 0, lockerror = ENOENT, status, numops;
257 uint32_t len = 0;
258 u_int64_t xid;
259 struct nfsm_chain nmreq, nmrep;
260 struct nfsreq_secinfo_args si;
261
262 nmp = NFSTONMP(np);
263 if (nfs_mount_gone(nmp))
264 return (ENXIO);
265 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
266 return (EINVAL);
267 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
268 nfsm_chain_null(&nmreq);
269 nfsm_chain_null(&nmrep);
270
271 // PUTFH, GETATTR, READLINK
272 numops = 3;
273 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
274 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
275 numops--;
276 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
277 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
278 numops--;
279 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
280 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
281 numops--;
282 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
283 nfsm_chain_build_done(error, &nmreq);
284 nfsm_assert(error, (numops == 0), EPROTO);
285 nfsmout_if(error);
286 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
287
288 if ((lockerror = nfs_node_lock(np)))
289 error = lockerror;
290 nfsm_chain_skip_tag(error, &nmrep);
291 nfsm_chain_get_32(error, &nmrep, numops);
292 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
294 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
295 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
296 nfsm_chain_get_32(error, &nmrep, len);
297 nfsmout_if(error);
298 if (len >= *buflenp) {
299 if (np->n_size && (np->n_size < *buflenp))
300 len = np->n_size;
301 else
302 len = *buflenp - 1;
303 }
304 nfsm_chain_get_opaque(error, &nmrep, len, buf);
305 if (!error)
306 *buflenp = len;
307 nfsmout:
308 if (!lockerror)
309 nfs_node_unlock(np);
310 nfsm_chain_cleanup(&nmreq);
311 nfsm_chain_cleanup(&nmrep);
312 return (error);
313 }
314
315 int
316 nfs4_read_rpc_async(
317 nfsnode_t np,
318 off_t offset,
319 size_t len,
320 thread_t thd,
321 kauth_cred_t cred,
322 struct nfsreq_cbinfo *cb,
323 struct nfsreq **reqp)
324 {
325 struct nfsmount *nmp;
326 int error = 0, nfsvers, numops;
327 nfs_stateid stateid;
328 struct nfsm_chain nmreq;
329 struct nfsreq_secinfo_args si;
330
331 nmp = NFSTONMP(np);
332 if (nfs_mount_gone(nmp))
333 return (ENXIO);
334 nfsvers = nmp->nm_vers;
335 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
336 return (EINVAL);
337
338 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
339 nfsm_chain_null(&nmreq);
340
341 // PUTFH, READ, GETATTR
342 numops = 3;
343 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
344 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
345 numops--;
346 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
347 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
348 numops--;
349 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
350 nfs_get_stateid(np, thd, cred, &stateid);
351 nfsm_chain_add_stateid(error, &nmreq, &stateid);
352 nfsm_chain_add_64(error, &nmreq, offset);
353 nfsm_chain_add_32(error, &nmreq, len);
354 numops--;
355 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
356 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
357 nfsm_chain_build_done(error, &nmreq);
358 nfsm_assert(error, (numops == 0), EPROTO);
359 nfsmout_if(error);
360 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
361 nfsmout:
362 nfsm_chain_cleanup(&nmreq);
363 return (error);
364 }
365
366 int
367 nfs4_read_rpc_async_finish(
368 nfsnode_t np,
369 struct nfsreq *req,
370 uio_t uio,
371 size_t *lenp,
372 int *eofp)
373 {
374 struct nfsmount *nmp;
375 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
376 size_t retlen = 0;
377 u_int64_t xid;
378 struct nfsm_chain nmrep;
379
380 nmp = NFSTONMP(np);
381 if (nfs_mount_gone(nmp)) {
382 nfs_request_async_cancel(req);
383 return (ENXIO);
384 }
385 nfsvers = nmp->nm_vers;
386
387 nfsm_chain_null(&nmrep);
388
389 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
390 if (error == EINPROGRESS) /* async request restarted */
391 return (error);
392
393 if ((lockerror = nfs_node_lock(np)))
394 error = lockerror;
395 nfsm_chain_skip_tag(error, &nmrep);
396 nfsm_chain_get_32(error, &nmrep, numops);
397 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
398 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
399 nfsm_chain_get_32(error, &nmrep, eof);
400 nfsm_chain_get_32(error, &nmrep, retlen);
401 if (!error) {
402 *lenp = MIN(retlen, *lenp);
403 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
404 }
405 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
406 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
407 if (!lockerror)
408 nfs_node_unlock(np);
409 if (eofp) {
410 if (!eof && !retlen)
411 eof = 1;
412 *eofp = eof;
413 }
414 nfsm_chain_cleanup(&nmrep);
415 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
416 microuptime(&np->n_lastio);
417 return (error);
418 }
419
420 int
421 nfs4_write_rpc_async(
422 nfsnode_t np,
423 uio_t uio,
424 size_t len,
425 thread_t thd,
426 kauth_cred_t cred,
427 int iomode,
428 struct nfsreq_cbinfo *cb,
429 struct nfsreq **reqp)
430 {
431 struct nfsmount *nmp;
432 mount_t mp;
433 int error = 0, nfsvers, numops;
434 nfs_stateid stateid;
435 struct nfsm_chain nmreq;
436 struct nfsreq_secinfo_args si;
437
438 nmp = NFSTONMP(np);
439 if (nfs_mount_gone(nmp))
440 return (ENXIO);
441 nfsvers = nmp->nm_vers;
442 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
443 return (EINVAL);
444
445 /* for async mounts, don't bother sending sync write requests */
446 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
447 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
448 iomode = NFS_WRITE_UNSTABLE;
449
450 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
451 nfsm_chain_null(&nmreq);
452
453 // PUTFH, WRITE, GETATTR
454 numops = 3;
455 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
456 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
457 numops--;
458 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
459 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
460 numops--;
461 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
462 nfs_get_stateid(np, thd, cred, &stateid);
463 nfsm_chain_add_stateid(error, &nmreq, &stateid);
464 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
465 nfsm_chain_add_32(error, &nmreq, iomode);
466 nfsm_chain_add_32(error, &nmreq, len);
467 if (!error)
468 error = nfsm_chain_add_uio(&nmreq, uio, len);
469 numops--;
470 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
471 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
472 nfsm_chain_build_done(error, &nmreq);
473 nfsm_assert(error, (numops == 0), EPROTO);
474 nfsmout_if(error);
475
476 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
477 nfsmout:
478 nfsm_chain_cleanup(&nmreq);
479 return (error);
480 }
481
482 int
483 nfs4_write_rpc_async_finish(
484 nfsnode_t np,
485 struct nfsreq *req,
486 int *iomodep,
487 size_t *rlenp,
488 uint64_t *wverfp)
489 {
490 struct nfsmount *nmp;
491 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
492 int committed = NFS_WRITE_FILESYNC;
493 size_t rlen = 0;
494 u_int64_t xid, wverf;
495 mount_t mp;
496 struct nfsm_chain nmrep;
497
498 nmp = NFSTONMP(np);
499 if (nfs_mount_gone(nmp)) {
500 nfs_request_async_cancel(req);
501 return (ENXIO);
502 }
503 nfsvers = nmp->nm_vers;
504
505 nfsm_chain_null(&nmrep);
506
507 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
508 if (error == EINPROGRESS) /* async request restarted */
509 return (error);
510 nmp = NFSTONMP(np);
511 if (nfs_mount_gone(nmp))
512 error = ENXIO;
513 if (!error && (lockerror = nfs_node_lock(np)))
514 error = lockerror;
515 nfsm_chain_skip_tag(error, &nmrep);
516 nfsm_chain_get_32(error, &nmrep, numops);
517 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
518 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
519 nfsm_chain_get_32(error, &nmrep, rlen);
520 nfsmout_if(error);
521 *rlenp = rlen;
522 if (rlen <= 0)
523 error = NFSERR_IO;
524 nfsm_chain_get_32(error, &nmrep, committed);
525 nfsm_chain_get_64(error, &nmrep, wverf);
526 nfsmout_if(error);
527 if (wverfp)
528 *wverfp = wverf;
529 lck_mtx_lock(&nmp->nm_lock);
530 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
531 nmp->nm_verf = wverf;
532 nmp->nm_state |= NFSSTA_HASWRITEVERF;
533 } else if (nmp->nm_verf != wverf) {
534 nmp->nm_verf = wverf;
535 }
536 lck_mtx_unlock(&nmp->nm_lock);
537 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
538 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
539 nfsmout:
540 if (!lockerror)
541 nfs_node_unlock(np);
542 nfsm_chain_cleanup(&nmrep);
543 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
544 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
545 committed = NFS_WRITE_FILESYNC;
546 *iomodep = committed;
547 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
548 microuptime(&np->n_lastio);
549 return (error);
550 }
551
552 int
553 nfs4_remove_rpc(
554 nfsnode_t dnp,
555 char *name,
556 int namelen,
557 thread_t thd,
558 kauth_cred_t cred)
559 {
560 int error = 0, lockerror = ENOENT, remove_error = 0, status;
561 struct nfsmount *nmp;
562 int nfsvers, numops;
563 u_int64_t xid;
564 struct nfsm_chain nmreq, nmrep;
565 struct nfsreq_secinfo_args si;
566
567 nmp = NFSTONMP(dnp);
568 if (nfs_mount_gone(nmp))
569 return (ENXIO);
570 nfsvers = nmp->nm_vers;
571 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
572 return (EINVAL);
573 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
574 restart:
575 nfsm_chain_null(&nmreq);
576 nfsm_chain_null(&nmrep);
577
578 // PUTFH, REMOVE, GETATTR
579 numops = 3;
580 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
581 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
582 numops--;
583 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
584 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
585 numops--;
586 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
587 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
588 numops--;
589 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
590 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
591 nfsm_chain_build_done(error, &nmreq);
592 nfsm_assert(error, (numops == 0), EPROTO);
593 nfsmout_if(error);
594
595 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
596
597 if ((lockerror = nfs_node_lock(dnp)))
598 error = lockerror;
599 nfsm_chain_skip_tag(error, &nmrep);
600 nfsm_chain_get_32(error, &nmrep, numops);
601 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
602 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
603 remove_error = error;
604 nfsm_chain_check_change_info(error, &nmrep, dnp);
605 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
606 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
607 if (error && !lockerror)
608 NATTRINVALIDATE(dnp);
609 nfsmout:
610 nfsm_chain_cleanup(&nmreq);
611 nfsm_chain_cleanup(&nmrep);
612
613 if (!lockerror) {
614 dnp->n_flag |= NMODIFIED;
615 nfs_node_unlock(dnp);
616 }
617 if (error == NFSERR_GRACE) {
618 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
619 goto restart;
620 }
621
622 return (remove_error);
623 }
624
625 int
626 nfs4_rename_rpc(
627 nfsnode_t fdnp,
628 char *fnameptr,
629 int fnamelen,
630 nfsnode_t tdnp,
631 char *tnameptr,
632 int tnamelen,
633 vfs_context_t ctx)
634 {
635 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
636 struct nfsmount *nmp;
637 u_int64_t xid, savedxid;
638 struct nfsm_chain nmreq, nmrep;
639 struct nfsreq_secinfo_args si;
640
641 nmp = NFSTONMP(fdnp);
642 if (nfs_mount_gone(nmp))
643 return (ENXIO);
644 nfsvers = nmp->nm_vers;
645 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
646 return (EINVAL);
647 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
648 return (EINVAL);
649
650 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
651 nfsm_chain_null(&nmreq);
652 nfsm_chain_null(&nmrep);
653
654 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
655 numops = 7;
656 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
657 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
658 numops--;
659 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
660 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
661 numops--;
662 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
663 numops--;
664 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
665 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
666 numops--;
667 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
668 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
669 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
670 numops--;
671 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
672 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
673 numops--;
674 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
675 numops--;
676 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
677 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
678 nfsm_chain_build_done(error, &nmreq);
679 nfsm_assert(error, (numops == 0), EPROTO);
680 nfsmout_if(error);
681
682 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
683
684 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
685 error = lockerror;
686 nfsm_chain_skip_tag(error, &nmrep);
687 nfsm_chain_get_32(error, &nmrep, numops);
688 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
689 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
690 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
691 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
692 nfsm_chain_check_change_info(error, &nmrep, fdnp);
693 nfsm_chain_check_change_info(error, &nmrep, tdnp);
694 /* directory attributes: if we don't get them, make sure to invalidate */
695 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
696 savedxid = xid;
697 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
698 if (error && !lockerror)
699 NATTRINVALIDATE(tdnp);
700 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
701 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
702 xid = savedxid;
703 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
704 if (error && !lockerror)
705 NATTRINVALIDATE(fdnp);
706 nfsmout:
707 nfsm_chain_cleanup(&nmreq);
708 nfsm_chain_cleanup(&nmrep);
709 if (!lockerror) {
710 fdnp->n_flag |= NMODIFIED;
711 tdnp->n_flag |= NMODIFIED;
712 nfs_node_unlock2(fdnp, tdnp);
713 }
714 return (error);
715 }
716
717 /*
718 * NFS V4 readdir RPC.
719 */
720 int
721 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
722 {
723 struct nfsmount *nmp;
724 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
725 int i, status, more_entries = 1, eof, bp_dropped = 0;
726 uint32_t nmreaddirsize, nmrsize;
727 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
728 uint64_t cookie, lastcookie, xid, savedxid;
729 struct nfsm_chain nmreq, nmrep, nmrepsave;
730 fhandle_t fh;
731 struct nfs_vattr nvattr, *nvattrp;
732 struct nfs_dir_buf_header *ndbhp;
733 struct direntry *dp;
734 char *padstart, padlen;
735 const char *tag;
736 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
737 struct timeval now;
738 struct nfsreq_secinfo_args si;
739
740 nmp = NFSTONMP(dnp);
741 if (nfs_mount_gone(nmp))
742 return (ENXIO);
743 nfsvers = nmp->nm_vers;
744 nmreaddirsize = nmp->nm_readdirsize;
745 nmrsize = nmp->nm_rsize;
746 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
747 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
748 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
749 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
750 return (EINVAL);
751 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
752
753 /*
754 * Set up attribute request for entries.
755 * For READDIRPLUS functionality, get everything.
756 * Otherwise, just get what we need for struct direntry.
757 */
758 if (rdirplus) {
759 tag = "readdirplus";
760 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
761 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
762 } else {
763 tag = "readdir";
764 NFS_CLEAR_ATTRIBUTES(entry_attrs);
765 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
766 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
767 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
768 }
769 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
770
771 /* lock to protect access to cookie verifier */
772 if ((lockerror = nfs_node_lock(dnp)))
773 return (lockerror);
774
775 /* determine cookie to use, and move dp to the right offset */
776 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
777 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
778 if (ndbhp->ndbh_count) {
779 for (i=0; i < ndbhp->ndbh_count-1; i++)
780 dp = NFS_DIRENTRY_NEXT(dp);
781 cookie = dp->d_seekoff;
782 dp = NFS_DIRENTRY_NEXT(dp);
783 } else {
784 cookie = bp->nb_lblkno;
785 /* increment with every buffer read */
786 OSAddAtomic64(1, &nfsstats.readdir_bios);
787 }
788 lastcookie = cookie;
789
790 /*
791 * The NFS client is responsible for the "." and ".." entries in the
792 * directory. So, we put them at the start of the first buffer.
793 * Don't bother for attribute directories.
794 */
795 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
796 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
797 fh.fh_len = 0;
798 fhlen = rdirplus ? fh.fh_len + 1 : 0;
799 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
800 /* "." */
801 namlen = 1;
802 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
803 if (xlen)
804 bzero(&dp->d_name[namlen+1], xlen);
805 dp->d_namlen = namlen;
806 strlcpy(dp->d_name, ".", namlen+1);
807 dp->d_fileno = dnp->n_vattr.nva_fileid;
808 dp->d_type = DT_DIR;
809 dp->d_reclen = reclen;
810 dp->d_seekoff = 1;
811 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
812 dp = NFS_DIRENTRY_NEXT(dp);
813 padlen = (char*)dp - padstart;
814 if (padlen > 0)
815 bzero(padstart, padlen);
816 if (rdirplus) /* zero out attributes */
817 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
818
819 /* ".." */
820 namlen = 2;
821 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
822 if (xlen)
823 bzero(&dp->d_name[namlen+1], xlen);
824 dp->d_namlen = namlen;
825 strlcpy(dp->d_name, "..", namlen+1);
826 if (dnp->n_parent)
827 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
828 else
829 dp->d_fileno = dnp->n_vattr.nva_fileid;
830 dp->d_type = DT_DIR;
831 dp->d_reclen = reclen;
832 dp->d_seekoff = 2;
833 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
834 dp = NFS_DIRENTRY_NEXT(dp);
835 padlen = (char*)dp - padstart;
836 if (padlen > 0)
837 bzero(padstart, padlen);
838 if (rdirplus) /* zero out attributes */
839 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
840
841 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
842 ndbhp->ndbh_count = 2;
843 }
844
845 /*
846 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
847 * the buffer is full (or we hit EOF). Then put the remainder of the
848 * results in the next buffer(s).
849 */
850 nfsm_chain_null(&nmreq);
851 nfsm_chain_null(&nmrep);
852 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
853
854 // PUTFH, GETATTR, READDIR
855 numops = 3;
856 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
857 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
858 numops--;
859 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
860 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
861 numops--;
862 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
863 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
864 numops--;
865 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
866 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
867 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
868 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
869 nfsm_chain_add_32(error, &nmreq, nmrsize);
870 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
871 nfsm_chain_build_done(error, &nmreq);
872 nfsm_assert(error, (numops == 0), EPROTO);
873 nfs_node_unlock(dnp);
874 nfsmout_if(error);
875 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
876
877 if ((lockerror = nfs_node_lock(dnp)))
878 error = lockerror;
879
880 savedxid = xid;
881 nfsm_chain_skip_tag(error, &nmrep);
882 nfsm_chain_get_32(error, &nmrep, numops);
883 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
884 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
885 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
886 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
887 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
888 nfsm_chain_get_32(error, &nmrep, more_entries);
889
890 if (!lockerror) {
891 nfs_node_unlock(dnp);
892 lockerror = ENOENT;
893 }
894 nfsmout_if(error);
895
896 if (rdirplus)
897 microuptime(&now);
898
899 /* loop through the entries packing them into the buffer */
900 while (more_entries) {
901 /* Entry: COOKIE, NAME, FATTR */
902 nfsm_chain_get_64(error, &nmrep, cookie);
903 nfsm_chain_get_32(error, &nmrep, namlen);
904 nfsmout_if(error);
905 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
906 /* we've got a big cookie, make sure flag is set */
907 lck_mtx_lock(&nmp->nm_lock);
908 nmp->nm_state |= NFSSTA_BIGCOOKIES;
909 lck_mtx_unlock(&nmp->nm_lock);
910 bigcookies = 1;
911 }
912 /* just truncate names that don't fit in direntry.d_name */
913 if (namlen <= 0) {
914 error = EBADRPC;
915 goto nfsmout;
916 }
917 if (namlen > (sizeof(dp->d_name)-1)) {
918 skiplen = namlen - sizeof(dp->d_name) + 1;
919 namlen = sizeof(dp->d_name) - 1;
920 } else {
921 skiplen = 0;
922 }
923 /* guess that fh size will be same as parent */
924 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
925 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
926 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
927 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
928 space_needed = reclen + attrlen;
929 space_free = nfs_dir_buf_freespace(bp, rdirplus);
930 if (space_needed > space_free) {
931 /*
932 * We still have entries to pack, but we've
933 * run out of room in the current buffer.
934 * So we need to move to the next buffer.
935 * The block# for the next buffer is the
936 * last cookie in the current buffer.
937 */
938 nextbuffer:
939 ndbhp->ndbh_flags |= NDB_FULL;
940 nfs_buf_release(bp, 0);
941 bp_dropped = 1;
942 bp = NULL;
943 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
944 nfsmout_if(error);
945 /* initialize buffer */
946 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
947 ndbhp->ndbh_flags = 0;
948 ndbhp->ndbh_count = 0;
949 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
950 ndbhp->ndbh_ncgen = dnp->n_ncgen;
951 space_free = nfs_dir_buf_freespace(bp, rdirplus);
952 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
953 /* increment with every buffer read */
954 OSAddAtomic64(1, &nfsstats.readdir_bios);
955 }
956 nmrepsave = nmrep;
957 dp->d_fileno = cookie; /* placeholder */
958 dp->d_seekoff = cookie;
959 dp->d_namlen = namlen;
960 dp->d_reclen = reclen;
961 dp->d_type = DT_UNKNOWN;
962 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
963 nfsmout_if(error);
964 dp->d_name[namlen] = '\0';
965 if (skiplen)
966 nfsm_chain_adv(error, &nmrep,
967 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
968 nfsmout_if(error);
969 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
970 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
971 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
972 /* we do NOT want ACLs returned to us here */
973 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
974 if (nvattrp->nva_acl) {
975 kauth_acl_free(nvattrp->nva_acl);
976 nvattrp->nva_acl = NULL;
977 }
978 }
979 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
980 /* OK, we may not have gotten all of the attributes but we will use what we can. */
981 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
982 /* set this up to look like a referral trigger */
983 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
984 }
985 error = 0;
986 }
987 /* check for more entries after this one */
988 nfsm_chain_get_32(error, &nmrep, more_entries);
989 nfsmout_if(error);
990
991 /* Skip any "." and ".." entries returned from server. */
992 /* Also skip any bothersome named attribute entries. */
993 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
994 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
995 lastcookie = cookie;
996 continue;
997 }
998
999 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
1000 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1001 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
1002 dp->d_fileno = nvattrp->nva_fileid;
1003 if (rdirplus) {
1004 /* fileid is already in d_fileno, so stash xid in attrs */
1005 nvattrp->nva_fileid = savedxid;
1006 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1007 fhlen = fh.fh_len + 1;
1008 xlen = fhlen + sizeof(time_t);
1009 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1010 space_needed = reclen + attrlen;
1011 if (space_needed > space_free) {
1012 /* didn't actually have the room... move on to next buffer */
1013 nmrep = nmrepsave;
1014 goto nextbuffer;
1015 }
1016 /* pack the file handle into the record */
1017 dp->d_name[dp->d_namlen+1] = fh.fh_len;
1018 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
1019 } else {
1020 /* mark the file handle invalid */
1021 fh.fh_len = 0;
1022 fhlen = fh.fh_len + 1;
1023 xlen = fhlen + sizeof(time_t);
1024 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1025 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
1026 }
1027 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
1028 dp->d_reclen = reclen;
1029 }
1030 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1031 ndbhp->ndbh_count++;
1032 lastcookie = cookie;
1033
1034 /* advance to next direntry in buffer */
1035 dp = NFS_DIRENTRY_NEXT(dp);
1036 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1037 /* zero out the pad bytes */
1038 padlen = (char*)dp - padstart;
1039 if (padlen > 0)
1040 bzero(padstart, padlen);
1041 }
1042 /* Finally, get the eof boolean */
1043 nfsm_chain_get_32(error, &nmrep, eof);
1044 nfsmout_if(error);
1045 if (eof) {
1046 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
1047 nfs_node_lock_force(dnp);
1048 dnp->n_eofcookie = lastcookie;
1049 nfs_node_unlock(dnp);
1050 } else {
1051 more_entries = 1;
1052 }
1053 if (bp_dropped) {
1054 nfs_buf_release(bp, 0);
1055 bp = NULL;
1056 break;
1057 }
1058 if ((lockerror = nfs_node_lock(dnp)))
1059 error = lockerror;
1060 nfsmout_if(error);
1061 nfsm_chain_cleanup(&nmrep);
1062 nfsm_chain_null(&nmreq);
1063 }
1064 nfsmout:
1065 if (bp_dropped && bp)
1066 nfs_buf_release(bp, 0);
1067 if (!lockerror)
1068 nfs_node_unlock(dnp);
1069 nfsm_chain_cleanup(&nmreq);
1070 nfsm_chain_cleanup(&nmrep);
1071 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
1072 }
1073
1074 int
1075 nfs4_lookup_rpc_async(
1076 nfsnode_t dnp,
1077 char *name,
1078 int namelen,
1079 vfs_context_t ctx,
1080 struct nfsreq **reqp)
1081 {
1082 int error = 0, isdotdot = 0, nfsvers, numops;
1083 struct nfsm_chain nmreq;
1084 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1085 struct nfsmount *nmp;
1086 struct nfsreq_secinfo_args si;
1087
1088 nmp = NFSTONMP(dnp);
1089 if (nfs_mount_gone(nmp))
1090 return (ENXIO);
1091 nfsvers = nmp->nm_vers;
1092 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1093 return (EINVAL);
1094
1095 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1096 isdotdot = 1;
1097 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1098 } else {
1099 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1100 }
1101
1102 nfsm_chain_null(&nmreq);
1103
1104 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1105 numops = 5;
1106 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1107 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1108 numops--;
1109 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1110 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1111 numops--;
1112 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1113 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1114 numops--;
1115 if (isdotdot) {
1116 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1117 } else {
1118 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1119 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1120 }
1121 numops--;
1122 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1123 numops--;
1124 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1125 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1126 /* some ".zfs" directories can't handle being asked for some attributes */
1127 if ((dnp->n_flag & NISDOTZFS) && !isdotdot)
1128 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1129 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot)
1130 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1131 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's')))
1132 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1133 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1134 nfsm_chain_build_done(error, &nmreq);
1135 nfsm_assert(error, (numops == 0), EPROTO);
1136 nfsmout_if(error);
1137 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1138 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1139 nfsmout:
1140 nfsm_chain_cleanup(&nmreq);
1141 return (error);
1142 }
1143
1144
1145 int
1146 nfs4_lookup_rpc_async_finish(
1147 nfsnode_t dnp,
1148 char *name,
1149 int namelen,
1150 vfs_context_t ctx,
1151 struct nfsreq *req,
1152 u_int64_t *xidp,
1153 fhandle_t *fhp,
1154 struct nfs_vattr *nvap)
1155 {
1156 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1157 uint32_t op = NFS_OP_LOOKUP;
1158 u_int64_t xid;
1159 struct nfsmount *nmp;
1160 struct nfsm_chain nmrep;
1161
1162 nmp = NFSTONMP(dnp);
1163 if (nmp == NULL)
1164 return (ENXIO);
1165 nfsvers = nmp->nm_vers;
1166 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1167 isdotdot = 1;
1168
1169 nfsm_chain_null(&nmrep);
1170
1171 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1172
1173 if ((lockerror = nfs_node_lock(dnp)))
1174 error = lockerror;
1175 nfsm_chain_skip_tag(error, &nmrep);
1176 nfsm_chain_get_32(error, &nmrep, numops);
1177 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1178 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1179 if (xidp)
1180 *xidp = xid;
1181 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1182
1183 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1184 nfsmout_if(error || !fhp || !nvap);
1185 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1186 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1187 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1188 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1189 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1190 /* set this up to look like a referral trigger */
1191 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1192 error = 0;
1193 } else {
1194 nfsmout_if(error);
1195 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1196 }
1197 nfsmout:
1198 if (!lockerror)
1199 nfs_node_unlock(dnp);
1200 nfsm_chain_cleanup(&nmrep);
1201 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1202 /* We still need to get SECINFO to set default for mount. */
1203 /* Do so for the first LOOKUP that returns successfully. */
1204 struct nfs_sec sec;
1205
1206 sec.count = NX_MAX_SEC_FLAVORS;
1207 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1208 /* [sigh] some implementations return "illegal" error for unsupported ops */
1209 if (error == NFSERR_OP_ILLEGAL)
1210 error = 0;
1211 if (!error) {
1212 /* set our default security flavor to the first in the list */
1213 lck_mtx_lock(&nmp->nm_lock);
1214 if (sec.count)
1215 nmp->nm_auth = sec.flavors[0];
1216 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1217 lck_mtx_unlock(&nmp->nm_lock);
1218 }
1219 }
1220 return (error);
1221 }
1222
1223 int
1224 nfs4_commit_rpc(
1225 nfsnode_t np,
1226 uint64_t offset,
1227 uint64_t count,
1228 kauth_cred_t cred,
1229 uint64_t wverf)
1230 {
1231 struct nfsmount *nmp;
1232 int error = 0, lockerror, status, nfsvers, numops;
1233 u_int64_t xid, newwverf;
1234 uint32_t count32;
1235 struct nfsm_chain nmreq, nmrep;
1236 struct nfsreq_secinfo_args si;
1237
1238 nmp = NFSTONMP(np);
1239 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1240 if (nfs_mount_gone(nmp))
1241 return (ENXIO);
1242 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1243 return (EINVAL);
1244 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1245 return (0);
1246 nfsvers = nmp->nm_vers;
1247
1248 if (count > UINT32_MAX)
1249 count32 = 0;
1250 else
1251 count32 = count;
1252
1253 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1254 nfsm_chain_null(&nmreq);
1255 nfsm_chain_null(&nmrep);
1256
1257 // PUTFH, COMMIT, GETATTR
1258 numops = 3;
1259 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1260 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1261 numops--;
1262 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1263 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1264 numops--;
1265 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1266 nfsm_chain_add_64(error, &nmreq, offset);
1267 nfsm_chain_add_32(error, &nmreq, count32);
1268 numops--;
1269 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1270 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1271 nfsm_chain_build_done(error, &nmreq);
1272 nfsm_assert(error, (numops == 0), EPROTO);
1273 nfsmout_if(error);
1274 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1275 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1276
1277 if ((lockerror = nfs_node_lock(np)))
1278 error = lockerror;
1279 nfsm_chain_skip_tag(error, &nmrep);
1280 nfsm_chain_get_32(error, &nmrep, numops);
1281 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1282 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1283 nfsm_chain_get_64(error, &nmrep, newwverf);
1284 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1285 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1286 if (!lockerror)
1287 nfs_node_unlock(np);
1288 nfsmout_if(error);
1289 lck_mtx_lock(&nmp->nm_lock);
1290 if (nmp->nm_verf != newwverf)
1291 nmp->nm_verf = newwverf;
1292 if (wverf != newwverf)
1293 error = NFSERR_STALEWRITEVERF;
1294 lck_mtx_unlock(&nmp->nm_lock);
1295 nfsmout:
1296 nfsm_chain_cleanup(&nmreq);
1297 nfsm_chain_cleanup(&nmrep);
1298 return (error);
1299 }
1300
1301 int
1302 nfs4_pathconf_rpc(
1303 nfsnode_t np,
1304 struct nfs_fsattr *nfsap,
1305 vfs_context_t ctx)
1306 {
1307 u_int64_t xid;
1308 int error = 0, lockerror, status, nfsvers, numops;
1309 struct nfsm_chain nmreq, nmrep;
1310 struct nfsmount *nmp = NFSTONMP(np);
1311 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1312 struct nfs_vattr nvattr;
1313 struct nfsreq_secinfo_args si;
1314
1315 if (nfs_mount_gone(nmp))
1316 return (ENXIO);
1317 nfsvers = nmp->nm_vers;
1318 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1319 return (EINVAL);
1320
1321 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1322 NVATTR_INIT(&nvattr);
1323 nfsm_chain_null(&nmreq);
1324 nfsm_chain_null(&nmrep);
1325
1326 /* NFSv4: fetch "pathconf" info for this node */
1327 // PUTFH, GETATTR
1328 numops = 2;
1329 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1330 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1331 numops--;
1332 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1333 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1334 numops--;
1335 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1336 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1337 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1338 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1339 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1340 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1341 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1342 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1343 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1344 nfsm_chain_build_done(error, &nmreq);
1345 nfsm_assert(error, (numops == 0), EPROTO);
1346 nfsmout_if(error);
1347 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1348
1349 nfsm_chain_skip_tag(error, &nmrep);
1350 nfsm_chain_get_32(error, &nmrep, numops);
1351 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1352 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1353 nfsmout_if(error);
1354 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1355 nfsmout_if(error);
1356 if ((lockerror = nfs_node_lock(np)))
1357 error = lockerror;
1358 if (!error)
1359 nfs_loadattrcache(np, &nvattr, &xid, 0);
1360 if (!lockerror)
1361 nfs_node_unlock(np);
1362 nfsmout:
1363 NVATTR_CLEANUP(&nvattr);
1364 nfsm_chain_cleanup(&nmreq);
1365 nfsm_chain_cleanup(&nmrep);
1366 return (error);
1367 }
1368
1369 int
1370 nfs4_vnop_getattr(
1371 struct vnop_getattr_args /* {
1372 struct vnodeop_desc *a_desc;
1373 vnode_t a_vp;
1374 struct vnode_attr *a_vap;
1375 vfs_context_t a_context;
1376 } */ *ap)
1377 {
1378 struct vnode_attr *vap = ap->a_vap;
1379 struct nfsmount *nmp;
1380 struct nfs_vattr nva;
1381 int error, acls, ngaflags;
1382
1383 nmp = VTONMP(ap->a_vp);
1384 if (nfs_mount_gone(nmp))
1385 return (ENXIO);
1386 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1387
1388 ngaflags = NGA_CACHED;
1389 if (VATTR_IS_ACTIVE(vap, va_acl) && acls)
1390 ngaflags |= NGA_ACL;
1391 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1392 if (error)
1393 return (error);
1394
1395 /* copy what we have in nva to *a_vap */
1396 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1397 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1398 VATTR_RETURN(vap, va_rdev, rdev);
1399 }
1400 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
1401 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1402 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
1403 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1404 // VATTR_RETURN(vap, va_data_alloc, ???);
1405 // VATTR_RETURN(vap, va_total_size, ???);
1406 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
1407 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1408 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1409 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1410 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1411 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1412 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1413 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1414 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1415 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1416 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1417 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1418 VATTR_RETURN(vap, va_mode, 0777);
1419 else
1420 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1421 }
1422 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1423 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1424 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1425 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1426 uint32_t flags = 0;
1427 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1428 (nva.nva_flags & NFS_FFLAG_ARCHIVED))
1429 flags |= SF_ARCHIVED;
1430 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1431 (nva.nva_flags & NFS_FFLAG_HIDDEN))
1432 flags |= UF_HIDDEN;
1433 VATTR_RETURN(vap, va_flags, flags);
1434 }
1435 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1436 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1437 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1438 VATTR_SET_SUPPORTED(vap, va_create_time);
1439 }
1440 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1441 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1442 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1443 VATTR_SET_SUPPORTED(vap, va_access_time);
1444 }
1445 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1446 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1447 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1448 VATTR_SET_SUPPORTED(vap, va_modify_time);
1449 }
1450 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1451 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1452 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1453 VATTR_SET_SUPPORTED(vap, va_change_time);
1454 }
1455 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1456 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1457 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1458 VATTR_SET_SUPPORTED(vap, va_backup_time);
1459 }
1460 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
1461 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1462 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
1463 VATTR_RETURN(vap, va_type, nva.nva_type);
1464 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
1465 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1466
1467 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1468 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1469 nva.nva_acl = NULL;
1470 }
1471
1472 // other attrs we might support someday:
1473 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1474
1475 NVATTR_CLEANUP(&nva);
1476 return (error);
1477 }
1478
1479 int
1480 nfs4_setattr_rpc(
1481 nfsnode_t np,
1482 struct vnode_attr *vap,
1483 vfs_context_t ctx)
1484 {
1485 struct nfsmount *nmp = NFSTONMP(np);
1486 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1487 u_int64_t xid, nextxid;
1488 struct nfsm_chain nmreq, nmrep;
1489 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1490 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1491 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1492 nfs_stateid stateid;
1493 struct nfsreq_secinfo_args si;
1494
1495 if (nfs_mount_gone(nmp))
1496 return (ENXIO);
1497 nfsvers = nmp->nm_vers;
1498 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1499 return (EINVAL);
1500
1501 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1502 /* we don't support setting unsupported flags (duh!) */
1503 if (vap->va_active & ~VNODE_ATTR_va_flags)
1504 return (EINVAL); /* return EINVAL if other attributes also set */
1505 else
1506 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1507 }
1508
1509 /* don't bother requesting some changes if they don't look like they are changing */
1510 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid))
1511 VATTR_CLEAR_ACTIVE(vap, va_uid);
1512 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid))
1513 VATTR_CLEAR_ACTIVE(vap, va_gid);
1514 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid))
1515 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1516 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid))
1517 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1518
1519 tryagain:
1520 /* do nothing if no attributes will be sent */
1521 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1522 if (!bitmap[0] && !bitmap[1])
1523 return (0);
1524
1525 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1526 nfsm_chain_null(&nmreq);
1527 nfsm_chain_null(&nmrep);
1528
1529 /*
1530 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1531 * need to invalidate any cached ACL. And if we had an ACL cached,
1532 * we might as well also fetch the new value.
1533 */
1534 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1535 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1536 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1537 if (NACLVALID(np))
1538 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1539 NACLINVALIDATE(np);
1540 }
1541
1542 // PUTFH, SETATTR, GETATTR
1543 numops = 3;
1544 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1545 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1546 numops--;
1547 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1548 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1549 numops--;
1550 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1551 if (VATTR_IS_ACTIVE(vap, va_data_size))
1552 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1553 else
1554 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1555 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1556 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1557 numops--;
1558 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1559 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1560 nfsm_chain_build_done(error, &nmreq);
1561 nfsm_assert(error, (numops == 0), EPROTO);
1562 nfsmout_if(error);
1563 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1564
1565 if ((lockerror = nfs_node_lock(np)))
1566 error = lockerror;
1567 nfsm_chain_skip_tag(error, &nmrep);
1568 nfsm_chain_get_32(error, &nmrep, numops);
1569 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1570 nfsmout_if(error);
1571 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1572 nfsmout_if(error == EBADRPC);
1573 setattr_error = error;
1574 error = 0;
1575 bmlen = NFS_ATTR_BITMAP_LEN;
1576 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1577 if (!error) {
1578 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
1579 microuptime(&np->n_lastio);
1580 nfs_vattr_set_supported(setbitmap, vap);
1581 error = setattr_error;
1582 }
1583 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1584 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1585 if (error)
1586 NATTRINVALIDATE(np);
1587 /*
1588 * We just changed the attributes and we want to make sure that we
1589 * see the latest attributes. Get the next XID. If it's not the
1590 * next XID after the SETATTR XID, then it's possible that another
1591 * RPC was in flight at the same time and it might put stale attributes
1592 * in the cache. In that case, we invalidate the attributes and set
1593 * the attribute cache XID to guarantee that newer attributes will
1594 * get loaded next.
1595 */
1596 nextxid = 0;
1597 nfs_get_xid(&nextxid);
1598 if (nextxid != (xid + 1)) {
1599 np->n_xid = nextxid;
1600 NATTRINVALIDATE(np);
1601 }
1602 nfsmout:
1603 if (!lockerror)
1604 nfs_node_unlock(np);
1605 nfsm_chain_cleanup(&nmreq);
1606 nfsm_chain_cleanup(&nmrep);
1607 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1608 /*
1609 * Some server's may not like ACL/mode combos that get sent.
1610 * If it looks like that's what the server choked on, try setting
1611 * just the ACL and not the mode (unless it looks like everything
1612 * but mode was already successfully set).
1613 */
1614 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1615 ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) {
1616 VATTR_CLEAR_ACTIVE(vap, va_mode);
1617 error = 0;
1618 goto tryagain;
1619 }
1620 }
1621 return (error);
1622 }
1623
1624 /*
1625 * Wait for any pending recovery to complete.
1626 */
1627 int
1628 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1629 {
1630 struct timespec ts = { 1, 0 };
1631 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1632
1633 lck_mtx_lock(&nmp->nm_lock);
1634 while (nmp->nm_state & NFSSTA_RECOVER) {
1635 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1636 break;
1637 nfs_mount_sock_thread_wake(nmp);
1638 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1639 slpflag = 0;
1640 }
1641 lck_mtx_unlock(&nmp->nm_lock);
1642
1643 return (error);
1644 }
1645
1646 /*
1647 * We're about to use/manipulate NFS mount's open/lock state.
1648 * Wait for any pending state recovery to complete, then
1649 * mark the state as being in use (which will hold off
1650 * the recovery thread until we're done).
1651 */
1652 int
1653 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1654 {
1655 struct timespec ts = { 1, 0 };
1656 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1657
1658 if (nfs_mount_gone(nmp))
1659 return (ENXIO);
1660 lck_mtx_lock(&nmp->nm_lock);
1661 if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
1662 lck_mtx_unlock(&nmp->nm_lock);
1663 return (ENXIO);
1664 }
1665 while (nmp->nm_state & NFSSTA_RECOVER) {
1666 if ((error = nfs_sigintr(nmp, NULL, thd, 1)))
1667 break;
1668 nfs_mount_sock_thread_wake(nmp);
1669 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1670 slpflag = 0;
1671 }
1672 if (!error)
1673 nmp->nm_stateinuse++;
1674 lck_mtx_unlock(&nmp->nm_lock);
1675
1676 return (error);
1677 }
1678
1679 /*
1680 * We're done using/manipulating the NFS mount's open/lock
1681 * state. If the given error indicates that recovery should
1682 * be performed, we'll initiate recovery.
1683 */
1684 int
1685 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1686 {
1687 int restart = nfs_mount_state_error_should_restart(error);
1688
1689 if (nfs_mount_gone(nmp))
1690 return (restart);
1691 lck_mtx_lock(&nmp->nm_lock);
1692 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1693 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1694 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1695 nfs_need_recover(nmp, error);
1696 }
1697 if (nmp->nm_stateinuse > 0)
1698 nmp->nm_stateinuse--;
1699 else
1700 panic("NFS mount state in use count underrun");
1701 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1702 wakeup(&nmp->nm_stateinuse);
1703 lck_mtx_unlock(&nmp->nm_lock);
1704 if (error == NFSERR_GRACE)
1705 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1706
1707 return (restart);
1708 }
1709
1710 /*
1711 * Does the error mean we should restart/redo a state-related operation?
1712 */
1713 int
1714 nfs_mount_state_error_should_restart(int error)
1715 {
1716 switch (error) {
1717 case NFSERR_STALE_STATEID:
1718 case NFSERR_STALE_CLIENTID:
1719 case NFSERR_ADMIN_REVOKED:
1720 case NFSERR_EXPIRED:
1721 case NFSERR_OLD_STATEID:
1722 case NFSERR_BAD_STATEID:
1723 case NFSERR_GRACE:
1724 return (1);
1725 }
1726 return (0);
1727 }
1728
1729 /*
1730 * In some cases we may want to limit how many times we restart a
1731 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1732 * Base the limit on the lease (as long as it's not too short).
1733 */
1734 uint
1735 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1736 {
1737 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1738 }
1739
1740 /*
1741 * Does the error mean we probably lost a delegation?
1742 */
1743 int
1744 nfs_mount_state_error_delegation_lost(int error)
1745 {
1746 switch (error) {
1747 case NFSERR_STALE_STATEID:
1748 case NFSERR_ADMIN_REVOKED:
1749 case NFSERR_EXPIRED:
1750 case NFSERR_OLD_STATEID:
1751 case NFSERR_BAD_STATEID:
1752 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1753 return (1);
1754 }
1755 return (0);
1756 }
1757
1758
1759 /*
1760 * Mark an NFS node's open state as busy.
1761 */
1762 int
1763 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1764 {
1765 struct nfsmount *nmp;
1766 struct timespec ts = {2, 0};
1767 int error = 0, slpflag;
1768
1769 nmp = NFSTONMP(np);
1770 if (nfs_mount_gone(nmp))
1771 return (ENXIO);
1772 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1773
1774 lck_mtx_lock(&np->n_openlock);
1775 while (np->n_openflags & N_OPENBUSY) {
1776 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1777 break;
1778 np->n_openflags |= N_OPENWANT;
1779 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1780 slpflag = 0;
1781 }
1782 if (!error)
1783 np->n_openflags |= N_OPENBUSY;
1784 lck_mtx_unlock(&np->n_openlock);
1785
1786 return (error);
1787 }
1788
1789 /*
1790 * Clear an NFS node's open state busy flag and wake up
1791 * anyone wanting it.
1792 */
1793 void
1794 nfs_open_state_clear_busy(nfsnode_t np)
1795 {
1796 int wanted;
1797
1798 lck_mtx_lock(&np->n_openlock);
1799 if (!(np->n_openflags & N_OPENBUSY))
1800 panic("nfs_open_state_clear_busy");
1801 wanted = (np->n_openflags & N_OPENWANT);
1802 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1803 lck_mtx_unlock(&np->n_openlock);
1804 if (wanted)
1805 wakeup(&np->n_openflags);
1806 }
1807
1808 /*
1809 * Search a mount's open owner list for the owner for this credential.
1810 * If not found and "alloc" is set, then allocate a new one.
1811 */
1812 struct nfs_open_owner *
1813 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1814 {
1815 uid_t uid = kauth_cred_getuid(cred);
1816 struct nfs_open_owner *noop, *newnoop = NULL;
1817
1818 tryagain:
1819 lck_mtx_lock(&nmp->nm_lock);
1820 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1821 if (kauth_cred_getuid(noop->noo_cred) == uid)
1822 break;
1823 }
1824
1825 if (!noop && !newnoop && alloc) {
1826 lck_mtx_unlock(&nmp->nm_lock);
1827 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1828 if (!newnoop)
1829 return (NULL);
1830 bzero(newnoop, sizeof(*newnoop));
1831 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1832 newnoop->noo_mount = nmp;
1833 kauth_cred_ref(cred);
1834 newnoop->noo_cred = cred;
1835 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1836 TAILQ_INIT(&newnoop->noo_opens);
1837 goto tryagain;
1838 }
1839 if (!noop && newnoop) {
1840 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1841 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1842 noop = newnoop;
1843 }
1844 lck_mtx_unlock(&nmp->nm_lock);
1845
1846 if (newnoop && (noop != newnoop))
1847 nfs_open_owner_destroy(newnoop);
1848
1849 if (noop)
1850 nfs_open_owner_ref(noop);
1851
1852 return (noop);
1853 }
1854
1855 /*
1856 * destroy an open owner that's no longer needed
1857 */
1858 void
1859 nfs_open_owner_destroy(struct nfs_open_owner *noop)
1860 {
1861 if (noop->noo_cred)
1862 kauth_cred_unref(&noop->noo_cred);
1863 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1864 FREE(noop, M_TEMP);
1865 }
1866
1867 /*
1868 * acquire a reference count on an open owner
1869 */
1870 void
1871 nfs_open_owner_ref(struct nfs_open_owner *noop)
1872 {
1873 lck_mtx_lock(&noop->noo_lock);
1874 noop->noo_refcnt++;
1875 lck_mtx_unlock(&noop->noo_lock);
1876 }
1877
1878 /*
1879 * drop a reference count on an open owner and destroy it if
1880 * it is no longer referenced and no longer on the mount's list.
1881 */
1882 void
1883 nfs_open_owner_rele(struct nfs_open_owner *noop)
1884 {
1885 lck_mtx_lock(&noop->noo_lock);
1886 if (noop->noo_refcnt < 1)
1887 panic("nfs_open_owner_rele: no refcnt");
1888 noop->noo_refcnt--;
1889 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1890 panic("nfs_open_owner_rele: busy");
1891 /* XXX we may potentially want to clean up idle/unused open owner structures */
1892 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1893 lck_mtx_unlock(&noop->noo_lock);
1894 return;
1895 }
1896 /* owner is no longer referenced or linked to mount, so destroy it */
1897 lck_mtx_unlock(&noop->noo_lock);
1898 nfs_open_owner_destroy(noop);
1899 }
1900
1901 /*
1902 * Mark an open owner as busy because we are about to
1903 * start an operation that uses and updates open owner state.
1904 */
1905 int
1906 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1907 {
1908 struct nfsmount *nmp;
1909 struct timespec ts = {2, 0};
1910 int error = 0, slpflag;
1911
1912 nmp = noop->noo_mount;
1913 if (nfs_mount_gone(nmp))
1914 return (ENXIO);
1915 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1916
1917 lck_mtx_lock(&noop->noo_lock);
1918 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1919 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1920 break;
1921 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1922 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
1923 slpflag = 0;
1924 }
1925 if (!error)
1926 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1927 lck_mtx_unlock(&noop->noo_lock);
1928
1929 return (error);
1930 }
1931
1932 /*
1933 * Clear the busy flag on an open owner and wake up anyone waiting
1934 * to mark it busy.
1935 */
1936 void
1937 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1938 {
1939 int wanted;
1940
1941 lck_mtx_lock(&noop->noo_lock);
1942 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1943 panic("nfs_open_owner_clear_busy");
1944 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1945 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1946 lck_mtx_unlock(&noop->noo_lock);
1947 if (wanted)
1948 wakeup(noop);
1949 }
1950
1951 /*
1952 * Given an open/lock owner and an error code, increment the
1953 * sequence ID if appropriate.
1954 */
1955 void
1956 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1957 {
1958 switch (error) {
1959 case NFSERR_STALE_CLIENTID:
1960 case NFSERR_STALE_STATEID:
1961 case NFSERR_OLD_STATEID:
1962 case NFSERR_BAD_STATEID:
1963 case NFSERR_BAD_SEQID:
1964 case NFSERR_BADXDR:
1965 case NFSERR_RESOURCE:
1966 case NFSERR_NOFILEHANDLE:
1967 /* do not increment the open seqid on these errors */
1968 return;
1969 }
1970 if (noop)
1971 noop->noo_seqid++;
1972 if (nlop)
1973 nlop->nlo_seqid++;
1974 }
1975
1976 /*
1977 * Search a node's open file list for any conflicts with this request.
1978 * Also find this open owner's open file structure.
1979 * If not found and "alloc" is set, then allocate one.
1980 */
1981 int
1982 nfs_open_file_find(
1983 nfsnode_t np,
1984 struct nfs_open_owner *noop,
1985 struct nfs_open_file **nofpp,
1986 uint32_t accessMode,
1987 uint32_t denyMode,
1988 int alloc)
1989 {
1990 *nofpp = NULL;
1991 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
1992 }
1993
1994 /*
1995 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
1996 * if an existing one is not found. This is used in "create" scenarios to
1997 * officially add the provisional nofp to the node once the node is created.
1998 */
1999 int
2000 nfs_open_file_find_internal(
2001 nfsnode_t np,
2002 struct nfs_open_owner *noop,
2003 struct nfs_open_file **nofpp,
2004 uint32_t accessMode,
2005 uint32_t denyMode,
2006 int alloc)
2007 {
2008 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2009
2010 if (!np)
2011 goto alloc;
2012 tryagain:
2013 lck_mtx_lock(&np->n_openlock);
2014 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2015 if (nofp2->nof_owner == noop) {
2016 nofp = nofp2;
2017 if (!accessMode)
2018 break;
2019 }
2020 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2021 /* This request conflicts with an existing open on this client. */
2022 lck_mtx_unlock(&np->n_openlock);
2023 return (EACCES);
2024 }
2025 }
2026
2027 /*
2028 * If this open owner doesn't have an open
2029 * file structure yet, we create one for it.
2030 */
2031 if (!nofp && !*nofpp && !newnofp && alloc) {
2032 lck_mtx_unlock(&np->n_openlock);
2033 alloc:
2034 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2035 if (!newnofp)
2036 return (ENOMEM);
2037 bzero(newnofp, sizeof(*newnofp));
2038 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2039 newnofp->nof_owner = noop;
2040 nfs_open_owner_ref(noop);
2041 newnofp->nof_np = np;
2042 lck_mtx_lock(&noop->noo_lock);
2043 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2044 lck_mtx_unlock(&noop->noo_lock);
2045 if (np)
2046 goto tryagain;
2047 }
2048 if (!nofp) {
2049 if (*nofpp) {
2050 (*nofpp)->nof_np = np;
2051 nofp = *nofpp;
2052 } else {
2053 nofp = newnofp;
2054 }
2055 if (nofp && np)
2056 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2057 }
2058 if (np)
2059 lck_mtx_unlock(&np->n_openlock);
2060
2061 if (alloc && newnofp && (nofp != newnofp))
2062 nfs_open_file_destroy(newnofp);
2063
2064 *nofpp = nofp;
2065 return (nofp ? 0 : ESRCH);
2066 }
2067
2068 /*
2069 * Destroy an open file structure.
2070 */
2071 void
2072 nfs_open_file_destroy(struct nfs_open_file *nofp)
2073 {
2074 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2075 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2076 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2077 nfs_open_owner_rele(nofp->nof_owner);
2078 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2079 FREE(nofp, M_TEMP);
2080 }
2081
2082 /*
2083 * Mark an open file as busy because we are about to
2084 * start an operation that uses and updates open file state.
2085 */
2086 int
2087 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2088 {
2089 struct nfsmount *nmp;
2090 struct timespec ts = {2, 0};
2091 int error = 0, slpflag;
2092
2093 nmp = nofp->nof_owner->noo_mount;
2094 if (nfs_mount_gone(nmp))
2095 return (ENXIO);
2096 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2097
2098 lck_mtx_lock(&nofp->nof_lock);
2099 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2100 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2101 break;
2102 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2103 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2104 slpflag = 0;
2105 }
2106 if (!error)
2107 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2108 lck_mtx_unlock(&nofp->nof_lock);
2109
2110 return (error);
2111 }
2112
2113 /*
2114 * Clear the busy flag on an open file and wake up anyone waiting
2115 * to mark it busy.
2116 */
2117 void
2118 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2119 {
2120 int wanted;
2121
2122 lck_mtx_lock(&nofp->nof_lock);
2123 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
2124 panic("nfs_open_file_clear_busy");
2125 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2126 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
2127 lck_mtx_unlock(&nofp->nof_lock);
2128 if (wanted)
2129 wakeup(nofp);
2130 }
2131
2132 /*
2133 * Add the open state for the given access/deny modes to this open file.
2134 */
2135 void
2136 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2137 {
2138 lck_mtx_lock(&nofp->nof_lock);
2139 nofp->nof_access |= accessMode;
2140 nofp->nof_deny |= denyMode;
2141
2142 if (delegated) {
2143 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2144 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2145 nofp->nof_d_r++;
2146 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2147 nofp->nof_d_w++;
2148 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2149 nofp->nof_d_rw++;
2150 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2151 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2152 nofp->nof_d_r_dw++;
2153 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2154 nofp->nof_d_w_dw++;
2155 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2156 nofp->nof_d_rw_dw++;
2157 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2158 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2159 nofp->nof_d_r_drw++;
2160 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2161 nofp->nof_d_w_drw++;
2162 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2163 nofp->nof_d_rw_drw++;
2164 }
2165 } else {
2166 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2167 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2168 nofp->nof_r++;
2169 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2170 nofp->nof_w++;
2171 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2172 nofp->nof_rw++;
2173 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2174 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2175 nofp->nof_r_dw++;
2176 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2177 nofp->nof_w_dw++;
2178 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2179 nofp->nof_rw_dw++;
2180 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2181 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2182 nofp->nof_r_drw++;
2183 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2184 nofp->nof_w_drw++;
2185 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2186 nofp->nof_rw_drw++;
2187 }
2188 }
2189
2190 nofp->nof_opencnt++;
2191 lck_mtx_unlock(&nofp->nof_lock);
2192 }
2193
2194 /*
2195 * Find which particular open combo will be closed and report what
2196 * the new modes will be and whether the open was delegated.
2197 */
2198 void
2199 nfs_open_file_remove_open_find(
2200 struct nfs_open_file *nofp,
2201 uint32_t accessMode,
2202 uint32_t denyMode,
2203 uint32_t *newAccessMode,
2204 uint32_t *newDenyMode,
2205 int *delegated)
2206 {
2207 /*
2208 * Calculate new modes: a mode bit gets removed when there's only
2209 * one count in all the corresponding counts
2210 */
2211 *newAccessMode = nofp->nof_access;
2212 *newDenyMode = nofp->nof_deny;
2213
2214 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2215 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2216 ((nofp->nof_r + nofp->nof_d_r +
2217 nofp->nof_rw + nofp->nof_d_rw +
2218 nofp->nof_r_dw + nofp->nof_d_r_dw +
2219 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2220 nofp->nof_r_drw + nofp->nof_d_r_drw +
2221 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2222 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2223 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2224 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2225 ((nofp->nof_w + nofp->nof_d_w +
2226 nofp->nof_rw + nofp->nof_d_rw +
2227 nofp->nof_w_dw + nofp->nof_d_w_dw +
2228 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2229 nofp->nof_w_drw + nofp->nof_d_w_drw +
2230 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2231 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2232 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2233 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2234 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2235 nofp->nof_w_drw + nofp->nof_d_w_drw +
2236 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1))
2237 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2238 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2239 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2240 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2241 nofp->nof_w_drw + nofp->nof_d_w_drw +
2242 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2243 nofp->nof_r_dw + nofp->nof_d_r_dw +
2244 nofp->nof_w_dw + nofp->nof_d_w_dw +
2245 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2246 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2247
2248 /* Find the corresponding open access/deny mode counter. */
2249 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2250 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2251 *delegated = (nofp->nof_d_r != 0);
2252 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2253 *delegated = (nofp->nof_d_w != 0);
2254 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2255 *delegated = (nofp->nof_d_rw != 0);
2256 else
2257 *delegated = 0;
2258 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2259 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2260 *delegated = (nofp->nof_d_r_dw != 0);
2261 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2262 *delegated = (nofp->nof_d_w_dw != 0);
2263 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2264 *delegated = (nofp->nof_d_rw_dw != 0);
2265 else
2266 *delegated = 0;
2267 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2268 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2269 *delegated = (nofp->nof_d_r_drw != 0);
2270 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2271 *delegated = (nofp->nof_d_w_drw != 0);
2272 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2273 *delegated = (nofp->nof_d_rw_drw != 0);
2274 else
2275 *delegated = 0;
2276 }
2277 }
2278
2279 /*
2280 * Remove the open state for the given access/deny modes to this open file.
2281 */
2282 void
2283 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2284 {
2285 uint32_t newAccessMode, newDenyMode;
2286 int delegated = 0;
2287
2288 lck_mtx_lock(&nofp->nof_lock);
2289 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2290
2291 /* Decrement the corresponding open access/deny mode counter. */
2292 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2293 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2294 if (delegated) {
2295 if (nofp->nof_d_r == 0)
2296 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2297 else
2298 nofp->nof_d_r--;
2299 } else {
2300 if (nofp->nof_r == 0)
2301 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2302 else
2303 nofp->nof_r--;
2304 }
2305 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2306 if (delegated) {
2307 if (nofp->nof_d_w == 0)
2308 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2309 else
2310 nofp->nof_d_w--;
2311 } else {
2312 if (nofp->nof_w == 0)
2313 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2314 else
2315 nofp->nof_w--;
2316 }
2317 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2318 if (delegated) {
2319 if (nofp->nof_d_rw == 0)
2320 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2321 else
2322 nofp->nof_d_rw--;
2323 } else {
2324 if (nofp->nof_rw == 0)
2325 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2326 else
2327 nofp->nof_rw--;
2328 }
2329 }
2330 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2331 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2332 if (delegated) {
2333 if (nofp->nof_d_r_dw == 0)
2334 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2335 else
2336 nofp->nof_d_r_dw--;
2337 } else {
2338 if (nofp->nof_r_dw == 0)
2339 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2340 else
2341 nofp->nof_r_dw--;
2342 }
2343 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2344 if (delegated) {
2345 if (nofp->nof_d_w_dw == 0)
2346 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2347 else
2348 nofp->nof_d_w_dw--;
2349 } else {
2350 if (nofp->nof_w_dw == 0)
2351 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2352 else
2353 nofp->nof_w_dw--;
2354 }
2355 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2356 if (delegated) {
2357 if (nofp->nof_d_rw_dw == 0)
2358 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2359 else
2360 nofp->nof_d_rw_dw--;
2361 } else {
2362 if (nofp->nof_rw_dw == 0)
2363 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2364 else
2365 nofp->nof_rw_dw--;
2366 }
2367 }
2368 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2369 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2370 if (delegated) {
2371 if (nofp->nof_d_r_drw == 0)
2372 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2373 else
2374 nofp->nof_d_r_drw--;
2375 } else {
2376 if (nofp->nof_r_drw == 0)
2377 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2378 else
2379 nofp->nof_r_drw--;
2380 }
2381 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2382 if (delegated) {
2383 if (nofp->nof_d_w_drw == 0)
2384 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2385 else
2386 nofp->nof_d_w_drw--;
2387 } else {
2388 if (nofp->nof_w_drw == 0)
2389 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2390 else
2391 nofp->nof_w_drw--;
2392 }
2393 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2394 if (delegated) {
2395 if (nofp->nof_d_rw_drw == 0)
2396 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2397 else
2398 nofp->nof_d_rw_drw--;
2399 } else {
2400 if (nofp->nof_rw_drw == 0)
2401 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2402 else
2403 nofp->nof_rw_drw--;
2404 }
2405 }
2406 }
2407
2408 /* update the modes */
2409 nofp->nof_access = newAccessMode;
2410 nofp->nof_deny = newDenyMode;
2411 nofp->nof_opencnt--;
2412 lck_mtx_unlock(&nofp->nof_lock);
2413 }
2414
2415
2416 /*
2417 * Get the current (delegation, lock, open, default) stateid for this node.
2418 * If node has a delegation, use that stateid.
2419 * If pid has a lock, use the lockowner's stateid.
2420 * Or use the open file's stateid.
2421 * If no open file, use a default stateid of all ones.
2422 */
2423 void
2424 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2425 {
2426 struct nfsmount *nmp = NFSTONMP(np);
2427 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2428 struct nfs_open_owner *noop = NULL;
2429 struct nfs_open_file *nofp = NULL;
2430 struct nfs_lock_owner *nlop = NULL;
2431 nfs_stateid *s = NULL;
2432
2433 if (np->n_openflags & N_DELEG_MASK) {
2434 s = &np->n_dstateid;
2435 } else {
2436 if (p)
2437 nlop = nfs_lock_owner_find(np, p, 0);
2438 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2439 /* we hold locks, use lock stateid */
2440 s = &nlop->nlo_stateid;
2441 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2442 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2443 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2444 nofp->nof_access) {
2445 /* we (should) have the file open, use open stateid */
2446 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
2447 nfs4_reopen(nofp, thd);
2448 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2449 s = &nofp->nof_stateid;
2450 }
2451 }
2452
2453 if (s) {
2454 sid->seqid = s->seqid;
2455 sid->other[0] = s->other[0];
2456 sid->other[1] = s->other[1];
2457 sid->other[2] = s->other[2];
2458 } else {
2459 /* named attributes may not have a stateid for reads, so don't complain for them */
2460 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
2461 NP(np, "nfs_get_stateid: no stateid");
2462 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2463 }
2464 if (nlop)
2465 nfs_lock_owner_rele(nlop);
2466 if (noop)
2467 nfs_open_owner_rele(noop);
2468 }
2469
2470
2471 /*
2472 * When we have a delegation, we may be able to perform the OPEN locally.
2473 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2474 */
2475 int
2476 nfs4_open_delegated(
2477 nfsnode_t np,
2478 struct nfs_open_file *nofp,
2479 uint32_t accessMode,
2480 uint32_t denyMode,
2481 vfs_context_t ctx)
2482 {
2483 int error = 0, ismember, readtoo = 0, authorized = 0;
2484 uint32_t action;
2485 struct kauth_acl_eval eval;
2486 kauth_cred_t cred = vfs_context_ucred(ctx);
2487
2488 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2489 /*
2490 * Try to open it for read access too,
2491 * so the buffer cache can read data.
2492 */
2493 readtoo = 1;
2494 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2495 }
2496
2497 tryagain:
2498 action = 0;
2499 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ)
2500 action |= KAUTH_VNODE_READ_DATA;
2501 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE)
2502 action |= KAUTH_VNODE_WRITE_DATA;
2503
2504 /* evaluate ACE (if we have one) */
2505 if (np->n_dace.ace_flags) {
2506 eval.ae_requested = action;
2507 eval.ae_acl = &np->n_dace;
2508 eval.ae_count = 1;
2509 eval.ae_options = 0;
2510 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred))
2511 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2512 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2513 if (!error && ismember)
2514 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2515
2516 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2517 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2518 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2519 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2520
2521 error = kauth_acl_evaluate(cred, &eval);
2522
2523 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW))
2524 authorized = 1;
2525 }
2526
2527 if (!authorized) {
2528 /* need to ask the server via ACCESS */
2529 struct vnop_access_args naa;
2530 naa.a_desc = &vnop_access_desc;
2531 naa.a_vp = NFSTOV(np);
2532 naa.a_action = action;
2533 naa.a_context = ctx;
2534 if (!(error = nfs_vnop_access(&naa)))
2535 authorized = 1;
2536 }
2537
2538 if (!authorized) {
2539 if (readtoo) {
2540 /* try again without the extra read access */
2541 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2542 readtoo = 0;
2543 goto tryagain;
2544 }
2545 return (error ? error : EACCES);
2546 }
2547
2548 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2549
2550 return (0);
2551 }
2552
2553
2554 /*
2555 * Open a file with the given access/deny modes.
2556 *
2557 * If we have a delegation, we may be able to handle the open locally.
2558 * Otherwise, we will always send the open RPC even if this open's mode is
2559 * a subset of all the existing opens. This makes sure that we will always
2560 * be able to do a downgrade to any of the open modes.
2561 *
2562 * Note: local conflicts should have already been checked in nfs_open_file_find().
2563 */
2564 int
2565 nfs4_open(
2566 nfsnode_t np,
2567 struct nfs_open_file *nofp,
2568 uint32_t accessMode,
2569 uint32_t denyMode,
2570 vfs_context_t ctx)
2571 {
2572 vnode_t vp = NFSTOV(np);
2573 vnode_t dvp = NULL;
2574 struct componentname cn;
2575 const char *vname = NULL;
2576 size_t namelen;
2577 char smallname[128];
2578 char *filename = NULL;
2579 int error = 0, readtoo = 0;
2580
2581 /*
2582 * We can handle the OPEN ourselves if we have a delegation,
2583 * unless it's a read delegation and the open is asking for
2584 * either write access or deny read. We also don't bother to
2585 * use the delegation if it's being returned.
2586 */
2587 if (np->n_openflags & N_DELEG_MASK) {
2588 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
2589 return (error);
2590 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2591 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2592 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2593 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2594 nfs_open_state_clear_busy(np);
2595 return (error);
2596 }
2597 nfs_open_state_clear_busy(np);
2598 }
2599
2600 /*
2601 * [sigh] We can't trust VFS to get the parent right for named
2602 * attribute nodes. (It likes to reparent the nodes after we've
2603 * created them.) Luckily we can probably get the right parent
2604 * from the n_parent we have stashed away.
2605 */
2606 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2607 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
2608 dvp = NULL;
2609 if (!dvp)
2610 dvp = vnode_getparent(vp);
2611 vname = vnode_getname(vp);
2612 if (!dvp || !vname) {
2613 if (!error)
2614 error = EIO;
2615 goto out;
2616 }
2617 filename = &smallname[0];
2618 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2619 if (namelen >= sizeof(smallname)) {
2620 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
2621 if (!filename) {
2622 error = ENOMEM;
2623 goto out;
2624 }
2625 snprintf(filename, namelen+1, "%s", vname);
2626 }
2627 bzero(&cn, sizeof(cn));
2628 cn.cn_nameptr = filename;
2629 cn.cn_namelen = namelen;
2630
2631 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2632 /*
2633 * Try to open it for read access too,
2634 * so the buffer cache can read data.
2635 */
2636 readtoo = 1;
2637 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2638 }
2639 tryagain:
2640 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2641 if (error) {
2642 if (!nfs_mount_state_error_should_restart(error) &&
2643 (error != EINTR) && (error != ERESTART) && readtoo) {
2644 /* try again without the extra read access */
2645 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2646 readtoo = 0;
2647 goto tryagain;
2648 }
2649 goto out;
2650 }
2651 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2652 out:
2653 if (filename && (filename != &smallname[0]))
2654 FREE(filename, M_TEMP);
2655 if (vname)
2656 vnode_putname(vname);
2657 if (dvp != NULLVP)
2658 vnode_put(dvp);
2659 return (error);
2660 }
2661
2662 int
2663 nfs_vnop_mmap(
2664 struct vnop_mmap_args /* {
2665 struct vnodeop_desc *a_desc;
2666 vnode_t a_vp;
2667 int a_fflags;
2668 vfs_context_t a_context;
2669 } */ *ap)
2670 {
2671 vfs_context_t ctx = ap->a_context;
2672 vnode_t vp = ap->a_vp;
2673 nfsnode_t np = VTONFS(vp);
2674 int error = 0, accessMode, denyMode, delegated;
2675 struct nfsmount *nmp;
2676 struct nfs_open_owner *noop = NULL;
2677 struct nfs_open_file *nofp = NULL;
2678
2679 nmp = VTONMP(vp);
2680 if (nfs_mount_gone(nmp))
2681 return (ENXIO);
2682
2683 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2684 return (EINVAL);
2685 if (np->n_flag & NREVOKE)
2686 return (EIO);
2687
2688 /*
2689 * fflags contains some combination of: PROT_READ, PROT_WRITE
2690 * Since it's not possible to mmap() without having the file open for reading,
2691 * read access is always there (regardless if PROT_READ is not set).
2692 */
2693 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2694 if (ap->a_fflags & PROT_WRITE)
2695 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2696 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2697
2698 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2699 if (!noop)
2700 return (ENOMEM);
2701
2702 restart:
2703 error = nfs_mount_state_in_use_start(nmp, NULL);
2704 if (error) {
2705 nfs_open_owner_rele(noop);
2706 return (error);
2707 }
2708 if (np->n_flag & NREVOKE) {
2709 error = EIO;
2710 nfs_mount_state_in_use_end(nmp, 0);
2711 nfs_open_owner_rele(noop);
2712 return (error);
2713 }
2714
2715 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2716 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2717 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2718 error = EPERM;
2719 }
2720 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2721 nfs_mount_state_in_use_end(nmp, 0);
2722 error = nfs4_reopen(nofp, NULL);
2723 nofp = NULL;
2724 if (!error)
2725 goto restart;
2726 }
2727 if (!error)
2728 error = nfs_open_file_set_busy(nofp, NULL);
2729 if (error) {
2730 nofp = NULL;
2731 goto out;
2732 }
2733
2734 /*
2735 * The open reference for mmap must mirror an existing open because
2736 * we may need to reclaim it after the file is closed.
2737 * So grab another open count matching the accessMode passed in.
2738 * If we already had an mmap open, prefer read/write without deny mode.
2739 * This means we may have to drop the current mmap open first.
2740 *
2741 * N.B. We should have an open for the mmap, because, mmap was
2742 * called on an open descriptor, or we've created an open for read
2743 * from reading the first page for execve. However, if we piggy
2744 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2745 * that open may have closed.
2746 */
2747
2748 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2749 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2750 /* We shouldn't get here. We've already open the file for execve */
2751 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2752 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2753 }
2754 /*
2755 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2756 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2757 */
2758 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2759 /* not asking for just read access -> fail */
2760 error = EPERM;
2761 goto out;
2762 }
2763 /* we don't have the file open, so open it for read access */
2764 if (nmp->nm_vers < NFS_VER4) {
2765 /* NFS v2/v3 opens are always allowed - so just add it. */
2766 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
2767 error = 0;
2768 } else {
2769 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
2770 }
2771 if (!error)
2772 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
2773 if (error)
2774 goto out;
2775 }
2776
2777 /* determine deny mode for open */
2778 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2779 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2780 delegated = 1;
2781 if (nofp->nof_d_rw)
2782 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2783 else if (nofp->nof_d_rw_dw)
2784 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2785 else if (nofp->nof_d_rw_drw)
2786 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2787 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2788 delegated = 0;
2789 if (nofp->nof_rw)
2790 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2791 else if (nofp->nof_rw_dw)
2792 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2793 else if (nofp->nof_rw_drw)
2794 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2795 } else {
2796 error = EPERM;
2797 }
2798 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2799 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
2800 delegated = 1;
2801 if (nofp->nof_d_r)
2802 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2803 else if (nofp->nof_d_r_dw)
2804 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2805 else if (nofp->nof_d_r_drw)
2806 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2807 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
2808 delegated = 0;
2809 if (nofp->nof_r)
2810 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2811 else if (nofp->nof_r_dw)
2812 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2813 else if (nofp->nof_r_drw)
2814 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2815 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2816 /*
2817 * This clause and the one below is to co-opt a read write access
2818 * for a read only mmaping. We probably got here in that an
2819 * existing rw open for an executable file already exists.
2820 */
2821 delegated = 1;
2822 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2823 if (nofp->nof_d_rw)
2824 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2825 else if (nofp->nof_d_rw_dw)
2826 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2827 else if (nofp->nof_d_rw_drw)
2828 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2829 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2830 delegated = 0;
2831 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2832 if (nofp->nof_rw)
2833 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2834 else if (nofp->nof_rw_dw)
2835 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2836 else if (nofp->nof_rw_drw)
2837 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2838 } else {
2839 error = EPERM;
2840 }
2841 }
2842 if (error) /* mmap mode without proper open mode */
2843 goto out;
2844
2845 /*
2846 * If the existing mmap access is more than the new access OR the
2847 * existing access is the same and the existing deny mode is less,
2848 * then we'll stick with the existing mmap open mode.
2849 */
2850 if ((nofp->nof_mmap_access > accessMode) ||
2851 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2852 goto out;
2853
2854 /* update mmap open mode */
2855 if (nofp->nof_mmap_access) {
2856 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2857 if (error) {
2858 if (!nfs_mount_state_error_should_restart(error))
2859 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2860 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2861 goto out;
2862 }
2863 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2864 }
2865
2866 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
2867 nofp->nof_mmap_access = accessMode;
2868 nofp->nof_mmap_deny = denyMode;
2869
2870 out:
2871 if (nofp)
2872 nfs_open_file_clear_busy(nofp);
2873 if (nfs_mount_state_in_use_end(nmp, error)) {
2874 nofp = NULL;
2875 goto restart;
2876 }
2877 if (noop)
2878 nfs_open_owner_rele(noop);
2879
2880 if (!error) {
2881 int ismapped = 0;
2882 nfs_node_lock_force(np);
2883 if ((np->n_flag & NISMAPPED) == 0) {
2884 np->n_flag |= NISMAPPED;
2885 ismapped = 1;
2886 }
2887 nfs_node_unlock(np);
2888 if (ismapped) {
2889 lck_mtx_lock(&nmp->nm_lock);
2890 nmp->nm_state &= ~NFSSTA_SQUISHY;
2891 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
2892 if (nmp->nm_curdeadtimeout <= 0)
2893 nmp->nm_deadto_start = 0;
2894 nmp->nm_mappers++;
2895 lck_mtx_unlock(&nmp->nm_lock);
2896 }
2897 }
2898
2899 return (error);
2900 }
2901
2902
2903 int
2904 nfs_vnop_mnomap(
2905 struct vnop_mnomap_args /* {
2906 struct vnodeop_desc *a_desc;
2907 vnode_t a_vp;
2908 vfs_context_t a_context;
2909 } */ *ap)
2910 {
2911 vfs_context_t ctx = ap->a_context;
2912 vnode_t vp = ap->a_vp;
2913 nfsnode_t np = VTONFS(vp);
2914 struct nfsmount *nmp;
2915 struct nfs_open_file *nofp = NULL;
2916 off_t size;
2917 int error;
2918 int is_mapped_flag = 0;
2919
2920 nmp = VTONMP(vp);
2921 if (nfs_mount_gone(nmp))
2922 return (ENXIO);
2923
2924 nfs_node_lock_force(np);
2925 if (np->n_flag & NISMAPPED) {
2926 is_mapped_flag = 1;
2927 np->n_flag &= ~NISMAPPED;
2928 }
2929 nfs_node_unlock(np);
2930 if (is_mapped_flag) {
2931 lck_mtx_lock(&nmp->nm_lock);
2932 if (nmp->nm_mappers)
2933 nmp->nm_mappers--;
2934 else
2935 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2936 lck_mtx_unlock(&nmp->nm_lock);
2937 }
2938
2939 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2940 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
2941 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
2942 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
2943
2944 /* walk all open files and close all mmap opens */
2945 loop:
2946 error = nfs_mount_state_in_use_start(nmp, NULL);
2947 if (error)
2948 return (error);
2949 lck_mtx_lock(&np->n_openlock);
2950 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2951 if (!nofp->nof_mmap_access)
2952 continue;
2953 lck_mtx_unlock(&np->n_openlock);
2954 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2955 nfs_mount_state_in_use_end(nmp, 0);
2956 error = nfs4_reopen(nofp, NULL);
2957 if (!error)
2958 goto loop;
2959 }
2960 if (!error)
2961 error = nfs_open_file_set_busy(nofp, NULL);
2962 if (error) {
2963 lck_mtx_lock(&np->n_openlock);
2964 break;
2965 }
2966 if (nofp->nof_mmap_access) {
2967 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2968 if (!nfs_mount_state_error_should_restart(error)) {
2969 if (error) /* not a state-operation-restarting error, so just clear the access */
2970 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2971 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2972 }
2973 if (error)
2974 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2975 }
2976 nfs_open_file_clear_busy(nofp);
2977 nfs_mount_state_in_use_end(nmp, error);
2978 goto loop;
2979 }
2980 lck_mtx_unlock(&np->n_openlock);
2981 nfs_mount_state_in_use_end(nmp, error);
2982 return (error);
2983 }
2984
2985 /*
2986 * Search a node's lock owner list for the owner for this process.
2987 * If not found and "alloc" is set, then allocate a new one.
2988 */
2989 struct nfs_lock_owner *
2990 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
2991 {
2992 pid_t pid = proc_pid(p);
2993 struct nfs_lock_owner *nlop, *newnlop = NULL;
2994
2995 tryagain:
2996 lck_mtx_lock(&np->n_openlock);
2997 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
2998 if (nlop->nlo_pid != pid)
2999 continue;
3000 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
3001 break;
3002 /* stale lock owner... reuse it if we can */
3003 if (nlop->nlo_refcnt) {
3004 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3005 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3006 lck_mtx_unlock(&np->n_openlock);
3007 goto tryagain;
3008 }
3009 nlop->nlo_pid_start = p->p_start;
3010 nlop->nlo_seqid = 0;
3011 nlop->nlo_stategenid = 0;
3012 break;
3013 }
3014
3015 if (!nlop && !newnlop && alloc) {
3016 lck_mtx_unlock(&np->n_openlock);
3017 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3018 if (!newnlop)
3019 return (NULL);
3020 bzero(newnlop, sizeof(*newnlop));
3021 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3022 newnlop->nlo_pid = pid;
3023 newnlop->nlo_pid_start = p->p_start;
3024 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3025 TAILQ_INIT(&newnlop->nlo_locks);
3026 goto tryagain;
3027 }
3028 if (!nlop && newnlop) {
3029 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3030 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3031 nlop = newnlop;
3032 }
3033 lck_mtx_unlock(&np->n_openlock);
3034
3035 if (newnlop && (nlop != newnlop))
3036 nfs_lock_owner_destroy(newnlop);
3037
3038 if (nlop)
3039 nfs_lock_owner_ref(nlop);
3040
3041 return (nlop);
3042 }
3043
3044 /*
3045 * destroy a lock owner that's no longer needed
3046 */
3047 void
3048 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3049 {
3050 if (nlop->nlo_open_owner) {
3051 nfs_open_owner_rele(nlop->nlo_open_owner);
3052 nlop->nlo_open_owner = NULL;
3053 }
3054 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3055 FREE(nlop, M_TEMP);
3056 }
3057
3058 /*
3059 * acquire a reference count on a lock owner
3060 */
3061 void
3062 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3063 {
3064 lck_mtx_lock(&nlop->nlo_lock);
3065 nlop->nlo_refcnt++;
3066 lck_mtx_unlock(&nlop->nlo_lock);
3067 }
3068
3069 /*
3070 * drop a reference count on a lock owner and destroy it if
3071 * it is no longer referenced and no longer on the mount's list.
3072 */
3073 void
3074 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3075 {
3076 lck_mtx_lock(&nlop->nlo_lock);
3077 if (nlop->nlo_refcnt < 1)
3078 panic("nfs_lock_owner_rele: no refcnt");
3079 nlop->nlo_refcnt--;
3080 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3081 panic("nfs_lock_owner_rele: busy");
3082 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3083 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3084 lck_mtx_unlock(&nlop->nlo_lock);
3085 return;
3086 }
3087 /* owner is no longer referenced or linked to mount, so destroy it */
3088 lck_mtx_unlock(&nlop->nlo_lock);
3089 nfs_lock_owner_destroy(nlop);
3090 }
3091
3092 /*
3093 * Mark a lock owner as busy because we are about to
3094 * start an operation that uses and updates lock owner state.
3095 */
3096 int
3097 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3098 {
3099 struct nfsmount *nmp;
3100 struct timespec ts = {2, 0};
3101 int error = 0, slpflag;
3102
3103 nmp = nlop->nlo_open_owner->noo_mount;
3104 if (nfs_mount_gone(nmp))
3105 return (ENXIO);
3106 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3107
3108 lck_mtx_lock(&nlop->nlo_lock);
3109 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3110 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
3111 break;
3112 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3113 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3114 slpflag = 0;
3115 }
3116 if (!error)
3117 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3118 lck_mtx_unlock(&nlop->nlo_lock);
3119
3120 return (error);
3121 }
3122
3123 /*
3124 * Clear the busy flag on a lock owner and wake up anyone waiting
3125 * to mark it busy.
3126 */
3127 void
3128 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3129 {
3130 int wanted;
3131
3132 lck_mtx_lock(&nlop->nlo_lock);
3133 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3134 panic("nfs_lock_owner_clear_busy");
3135 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3136 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
3137 lck_mtx_unlock(&nlop->nlo_lock);
3138 if (wanted)
3139 wakeup(nlop);
3140 }
3141
3142 /*
3143 * Insert a held lock into a lock owner's sorted list.
3144 * (flock locks are always inserted at the head the list)
3145 */
3146 void
3147 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3148 {
3149 struct nfs_file_lock *nflp;
3150
3151 /* insert new lock in lock owner's held lock list */
3152 lck_mtx_lock(&nlop->nlo_lock);
3153 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3154 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3155 } else {
3156 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3157 if (newnflp->nfl_start < nflp->nfl_start)
3158 break;
3159 }
3160 if (nflp)
3161 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3162 else
3163 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3164 }
3165 lck_mtx_unlock(&nlop->nlo_lock);
3166 }
3167
3168 /*
3169 * Get a file lock structure for this lock owner.
3170 */
3171 struct nfs_file_lock *
3172 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3173 {
3174 struct nfs_file_lock *nflp = NULL;
3175
3176 lck_mtx_lock(&nlop->nlo_lock);
3177 if (!nlop->nlo_alock.nfl_owner) {
3178 nflp = &nlop->nlo_alock;
3179 nflp->nfl_owner = nlop;
3180 }
3181 lck_mtx_unlock(&nlop->nlo_lock);
3182 if (!nflp) {
3183 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3184 if (!nflp)
3185 return (NULL);
3186 bzero(nflp, sizeof(*nflp));
3187 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3188 nflp->nfl_owner = nlop;
3189 }
3190 nfs_lock_owner_ref(nlop);
3191 return (nflp);
3192 }
3193
3194 /*
3195 * destroy the given NFS file lock structure
3196 */
3197 void
3198 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3199 {
3200 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3201
3202 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3203 nflp->nfl_owner = NULL;
3204 FREE(nflp, M_TEMP);
3205 } else {
3206 lck_mtx_lock(&nlop->nlo_lock);
3207 bzero(nflp, sizeof(*nflp));
3208 lck_mtx_unlock(&nlop->nlo_lock);
3209 }
3210 nfs_lock_owner_rele(nlop);
3211 }
3212
3213 /*
3214 * Check if one file lock conflicts with another.
3215 * (nflp1 is the new lock. nflp2 is the existing lock.)
3216 */
3217 int
3218 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3219 {
3220 /* no conflict if lock is dead */
3221 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
3222 return (0);
3223 /* no conflict if it's ours - unless the lock style doesn't match */
3224 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3225 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3226 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3227 (nflp1->nfl_start > nflp2->nfl_start) &&
3228 (nflp1->nfl_end < nflp2->nfl_end))
3229 *willsplit = 1;
3230 return (0);
3231 }
3232 /* no conflict if ranges don't overlap */
3233 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
3234 return (0);
3235 /* no conflict if neither lock is exclusive */
3236 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
3237 return (0);
3238 /* conflict */
3239 return (1);
3240 }
3241
3242 /*
3243 * Send an NFSv4 LOCK RPC to the server.
3244 */
3245 int
3246 nfs4_setlock_rpc(
3247 nfsnode_t np,
3248 struct nfs_open_file *nofp,
3249 struct nfs_file_lock *nflp,
3250 int reclaim,
3251 int flags,
3252 thread_t thd,
3253 kauth_cred_t cred)
3254 {
3255 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3256 struct nfsmount *nmp;
3257 struct nfsm_chain nmreq, nmrep;
3258 uint64_t xid;
3259 uint32_t locktype;
3260 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3261 struct nfsreq_secinfo_args si;
3262
3263 nmp = NFSTONMP(np);
3264 if (nfs_mount_gone(nmp))
3265 return (ENXIO);
3266 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3267 return (EINVAL);
3268
3269 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3270 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3271 ((nflp->nfl_type == F_WRLCK) ?
3272 NFS_LOCK_TYPE_WRITEW :
3273 NFS_LOCK_TYPE_READW) :
3274 ((nflp->nfl_type == F_WRLCK) ?
3275 NFS_LOCK_TYPE_WRITE :
3276 NFS_LOCK_TYPE_READ);
3277 if (newlocker) {
3278 error = nfs_open_file_set_busy(nofp, thd);
3279 if (error)
3280 return (error);
3281 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3282 if (error) {
3283 nfs_open_file_clear_busy(nofp);
3284 return (error);
3285 }
3286 if (!nlop->nlo_open_owner) {
3287 nfs_open_owner_ref(nofp->nof_owner);
3288 nlop->nlo_open_owner = nofp->nof_owner;
3289 }
3290 }
3291 error = nfs_lock_owner_set_busy(nlop, thd);
3292 if (error) {
3293 if (newlocker) {
3294 nfs_open_owner_clear_busy(nofp->nof_owner);
3295 nfs_open_file_clear_busy(nofp);
3296 }
3297 return (error);
3298 }
3299
3300 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3301 nfsm_chain_null(&nmreq);
3302 nfsm_chain_null(&nmrep);
3303
3304 // PUTFH, GETATTR, LOCK
3305 numops = 3;
3306 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3307 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3308 numops--;
3309 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3310 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3311 numops--;
3312 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3313 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3314 numops--;
3315 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3316 nfsm_chain_add_32(error, &nmreq, locktype);
3317 nfsm_chain_add_32(error, &nmreq, reclaim);
3318 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3319 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3320 nfsm_chain_add_32(error, &nmreq, newlocker);
3321 if (newlocker) {
3322 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3323 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3324 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3325 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3326 } else {
3327 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3328 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3329 }
3330 nfsm_chain_build_done(error, &nmreq);
3331 nfsm_assert(error, (numops == 0), EPROTO);
3332 nfsmout_if(error);
3333
3334 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3335
3336 if ((lockerror = nfs_node_lock(np)))
3337 error = lockerror;
3338 nfsm_chain_skip_tag(error, &nmrep);
3339 nfsm_chain_get_32(error, &nmrep, numops);
3340 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3341 nfsmout_if(error);
3342 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3343 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3344 nfsmout_if(error);
3345 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3346 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3347 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3348
3349 /* Update the lock owner's stategenid once it appears the server has state for it. */
3350 /* We determine this by noting the request was successful (we got a stateid). */
3351 if (newlocker && !error)
3352 nlop->nlo_stategenid = nmp->nm_stategenid;
3353 nfsmout:
3354 if (!lockerror)
3355 nfs_node_unlock(np);
3356 nfs_lock_owner_clear_busy(nlop);
3357 if (newlocker) {
3358 nfs_open_owner_clear_busy(nofp->nof_owner);
3359 nfs_open_file_clear_busy(nofp);
3360 }
3361 nfsm_chain_cleanup(&nmreq);
3362 nfsm_chain_cleanup(&nmrep);
3363 return (error);
3364 }
3365
3366 /*
3367 * Send an NFSv4 LOCKU RPC to the server.
3368 */
3369 int
3370 nfs4_unlock_rpc(
3371 nfsnode_t np,
3372 struct nfs_lock_owner *nlop,
3373 int type,
3374 uint64_t start,
3375 uint64_t end,
3376 int flags,
3377 thread_t thd,
3378 kauth_cred_t cred)
3379 {
3380 struct nfsmount *nmp;
3381 struct nfsm_chain nmreq, nmrep;
3382 uint64_t xid;
3383 int error = 0, lockerror = ENOENT, numops, status;
3384 struct nfsreq_secinfo_args si;
3385
3386 nmp = NFSTONMP(np);
3387 if (nfs_mount_gone(nmp))
3388 return (ENXIO);
3389 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3390 return (EINVAL);
3391
3392 error = nfs_lock_owner_set_busy(nlop, NULL);
3393 if (error)
3394 return (error);
3395
3396 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3397 nfsm_chain_null(&nmreq);
3398 nfsm_chain_null(&nmrep);
3399
3400 // PUTFH, GETATTR, LOCKU
3401 numops = 3;
3402 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3403 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3404 numops--;
3405 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3406 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3407 numops--;
3408 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3409 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3410 numops--;
3411 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3412 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3413 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3414 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3415 nfsm_chain_add_64(error, &nmreq, start);
3416 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3417 nfsm_chain_build_done(error, &nmreq);
3418 nfsm_assert(error, (numops == 0), EPROTO);
3419 nfsmout_if(error);
3420
3421 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3422
3423 if ((lockerror = nfs_node_lock(np)))
3424 error = lockerror;
3425 nfsm_chain_skip_tag(error, &nmrep);
3426 nfsm_chain_get_32(error, &nmrep, numops);
3427 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3428 nfsmout_if(error);
3429 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3430 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3431 nfsmout_if(error);
3432 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3433 nfs_owner_seqid_increment(NULL, nlop, error);
3434 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3435 nfsmout:
3436 if (!lockerror)
3437 nfs_node_unlock(np);
3438 nfs_lock_owner_clear_busy(nlop);
3439 nfsm_chain_cleanup(&nmreq);
3440 nfsm_chain_cleanup(&nmrep);
3441 return (error);
3442 }
3443
3444 /*
3445 * Send an NFSv4 LOCKT RPC to the server.
3446 */
3447 int
3448 nfs4_getlock_rpc(
3449 nfsnode_t np,
3450 struct nfs_lock_owner *nlop,
3451 struct flock *fl,
3452 uint64_t start,
3453 uint64_t end,
3454 vfs_context_t ctx)
3455 {
3456 struct nfsmount *nmp;
3457 struct nfsm_chain nmreq, nmrep;
3458 uint64_t xid, val64 = 0;
3459 uint32_t val = 0;
3460 int error = 0, lockerror, numops, status;
3461 struct nfsreq_secinfo_args si;
3462
3463 nmp = NFSTONMP(np);
3464 if (nfs_mount_gone(nmp))
3465 return (ENXIO);
3466 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3467 return (EINVAL);
3468
3469 lockerror = ENOENT;
3470 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3471 nfsm_chain_null(&nmreq);
3472 nfsm_chain_null(&nmrep);
3473
3474 // PUTFH, GETATTR, LOCKT
3475 numops = 3;
3476 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3477 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3478 numops--;
3479 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3480 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3481 numops--;
3482 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3483 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3484 numops--;
3485 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3486 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3487 nfsm_chain_add_64(error, &nmreq, start);
3488 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3489 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3490 nfsm_chain_build_done(error, &nmreq);
3491 nfsm_assert(error, (numops == 0), EPROTO);
3492 nfsmout_if(error);
3493
3494 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3495
3496 if ((lockerror = nfs_node_lock(np)))
3497 error = lockerror;
3498 nfsm_chain_skip_tag(error, &nmrep);
3499 nfsm_chain_get_32(error, &nmrep, numops);
3500 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3501 nfsmout_if(error);
3502 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3503 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3504 nfsmout_if(error);
3505 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3506 if (error == NFSERR_DENIED) {
3507 error = 0;
3508 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3509 nfsm_chain_get_64(error, &nmrep, val64);
3510 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3511 nfsm_chain_get_32(error, &nmrep, val);
3512 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3513 fl->l_pid = 0;
3514 fl->l_whence = SEEK_SET;
3515 } else if (!error) {
3516 fl->l_type = F_UNLCK;
3517 }
3518 nfsmout:
3519 if (!lockerror)
3520 nfs_node_unlock(np);
3521 nfsm_chain_cleanup(&nmreq);
3522 nfsm_chain_cleanup(&nmrep);
3523 return (error);
3524 }
3525
3526
3527 /*
3528 * Check for any conflicts with the given lock.
3529 *
3530 * Checking for a lock doesn't require the file to be opened.
3531 * So we skip all the open owner, open file, lock owner work
3532 * and just check for a conflicting lock.
3533 */
3534 int
3535 nfs_advlock_getlock(
3536 nfsnode_t np,
3537 struct nfs_lock_owner *nlop,
3538 struct flock *fl,
3539 uint64_t start,
3540 uint64_t end,
3541 vfs_context_t ctx)
3542 {
3543 struct nfsmount *nmp;
3544 struct nfs_file_lock *nflp;
3545 int error = 0, answered = 0;
3546
3547 nmp = NFSTONMP(np);
3548 if (nfs_mount_gone(nmp))
3549 return (ENXIO);
3550
3551 restart:
3552 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx))))
3553 return (error);
3554
3555 lck_mtx_lock(&np->n_openlock);
3556 /* scan currently held locks for conflict */
3557 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3558 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3559 continue;
3560 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3561 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3562 break;
3563 }
3564 if (nflp) {
3565 /* found a conflicting lock */
3566 fl->l_type = nflp->nfl_type;
3567 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3568 fl->l_start = nflp->nfl_start;
3569 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3570 fl->l_whence = SEEK_SET;
3571 answered = 1;
3572 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3573 /*
3574 * If we have a write delegation, we know there can't be other
3575 * locks on the server. So the answer is no conflicting lock found.
3576 */
3577 fl->l_type = F_UNLCK;
3578 answered = 1;
3579 }
3580 lck_mtx_unlock(&np->n_openlock);
3581 if (answered) {
3582 nfs_mount_state_in_use_end(nmp, 0);
3583 return (0);
3584 }
3585
3586 /* no conflict found locally, so ask the server */
3587 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3588
3589 if (nfs_mount_state_in_use_end(nmp, error))
3590 goto restart;
3591 return (error);
3592 }
3593
3594 /*
3595 * Acquire a file lock for the given range.
3596 *
3597 * Add the lock (request) to the lock queue.
3598 * Scan the lock queue for any conflicting locks.
3599 * If a conflict is found, block or return an error.
3600 * Once end of queue is reached, send request to the server.
3601 * If the server grants the lock, scan the lock queue and
3602 * update any existing locks. Then (optionally) scan the
3603 * queue again to coalesce any locks adjacent to the new one.
3604 */
3605 int
3606 nfs_advlock_setlock(
3607 nfsnode_t np,
3608 struct nfs_open_file *nofp,
3609 struct nfs_lock_owner *nlop,
3610 int op,
3611 uint64_t start,
3612 uint64_t end,
3613 int style,
3614 short type,
3615 vfs_context_t ctx)
3616 {
3617 struct nfsmount *nmp;
3618 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3619 struct nfs_file_lock *coalnflp;
3620 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3621 struct timespec ts = {1, 0};
3622
3623 nmp = NFSTONMP(np);
3624 if (nfs_mount_gone(nmp))
3625 return (ENXIO);
3626 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3627
3628 if ((type != F_RDLCK) && (type != F_WRLCK))
3629 return (EINVAL);
3630
3631 /* allocate a new lock */
3632 newnflp = nfs_file_lock_alloc(nlop);
3633 if (!newnflp)
3634 return (ENOLCK);
3635 newnflp->nfl_start = start;
3636 newnflp->nfl_end = end;
3637 newnflp->nfl_type = type;
3638 if (op == F_SETLKW)
3639 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3640 newnflp->nfl_flags |= style;
3641 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3642
3643 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3644 /*
3645 * For exclusive flock-style locks, if we block waiting for the
3646 * lock, we need to first release any currently held shared
3647 * flock-style lock. So, the first thing we do is check if we
3648 * have a shared flock-style lock.
3649 */
3650 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3651 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3652 nflp = NULL;
3653 if (nflp && (nflp->nfl_type != F_RDLCK))
3654 nflp = NULL;
3655 flocknflp = nflp;
3656 }
3657
3658 restart:
3659 restart = 0;
3660 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3661 if (error)
3662 goto error_out;
3663 inuse = 1;
3664 if (np->n_flag & NREVOKE) {
3665 error = EIO;
3666 nfs_mount_state_in_use_end(nmp, 0);
3667 inuse = 0;
3668 goto error_out;
3669 }
3670 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3671 nfs_mount_state_in_use_end(nmp, 0);
3672 inuse = 0;
3673 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3674 if (error)
3675 goto error_out;
3676 goto restart;
3677 }
3678
3679 lck_mtx_lock(&np->n_openlock);
3680 if (!inqueue) {
3681 /* insert new lock at beginning of list */
3682 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3683 inqueue = 1;
3684 }
3685
3686 /* scan current list of locks (held and pending) for conflicts */
3687 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3688 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3689 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3690 continue;
3691 /* Conflict */
3692 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3693 error = EAGAIN;
3694 break;
3695 }
3696 /* Block until this lock is no longer held. */
3697 if (nflp->nfl_blockcnt == UINT_MAX) {
3698 error = ENOLCK;
3699 break;
3700 }
3701 nflp->nfl_blockcnt++;
3702 do {
3703 if (flocknflp) {
3704 /* release any currently held shared lock before sleeping */
3705 lck_mtx_unlock(&np->n_openlock);
3706 nfs_mount_state_in_use_end(nmp, 0);
3707 inuse = 0;
3708 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3709 flocknflp = NULL;
3710 if (!error)
3711 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3712 if (error) {
3713 lck_mtx_lock(&np->n_openlock);
3714 break;
3715 }
3716 inuse = 1;
3717 lck_mtx_lock(&np->n_openlock);
3718 /* no need to block/sleep if the conflict is gone */
3719 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3720 break;
3721 }
3722 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
3723 slpflag = 0;
3724 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3725 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3726 /* looks like we have a recover pending... restart */
3727 restart = 1;
3728 lck_mtx_unlock(&np->n_openlock);
3729 nfs_mount_state_in_use_end(nmp, 0);
3730 inuse = 0;
3731 lck_mtx_lock(&np->n_openlock);
3732 break;
3733 }
3734 if (!error && (np->n_flag & NREVOKE))
3735 error = EIO;
3736 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3737 nflp->nfl_blockcnt--;
3738 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3739 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3740 nfs_file_lock_destroy(nflp);
3741 }
3742 if (error || restart)
3743 break;
3744 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3745 /* So, start this lock-scanning loop over from where it started. */
3746 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
3747 }
3748 lck_mtx_unlock(&np->n_openlock);
3749 if (restart)
3750 goto restart;
3751 if (error)
3752 goto error_out;
3753
3754 if (willsplit) {
3755 /*
3756 * It looks like this operation is splitting a lock.
3757 * We allocate a new lock now so we don't have to worry
3758 * about the allocation failing after we've updated some state.
3759 */
3760 nflp2 = nfs_file_lock_alloc(nlop);
3761 if (!nflp2) {
3762 error = ENOLCK;
3763 goto error_out;
3764 }
3765 }
3766
3767 /* once scan for local conflicts is clear, send request to server */
3768 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
3769 goto error_out;
3770 busy = 1;
3771 delay = 0;
3772 do {
3773 /* do we have a delegation? (that we're not returning?) */
3774 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
3775 if (np->n_openflags & N_DELEG_WRITE) {
3776 /* with a write delegation, just take the lock delegated */
3777 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
3778 error = 0;
3779 /* make sure the lock owner knows its open owner */
3780 if (!nlop->nlo_open_owner) {
3781 nfs_open_owner_ref(nofp->nof_owner);
3782 nlop->nlo_open_owner = nofp->nof_owner;
3783 }
3784 break;
3785 } else {
3786 /*
3787 * If we don't have any non-delegated opens but we do have
3788 * delegated opens, then we need to first claim the delegated
3789 * opens so that the lock request on the server can be associated
3790 * with an open it knows about.
3791 */
3792 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
3793 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
3794 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
3795 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
3796 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
3797 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
3798 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
3799 if (error)
3800 break;
3801 }
3802 }
3803 }
3804 if (np->n_flag & NREVOKE)
3805 error = EIO;
3806 if (!error)
3807 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
3808 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3809 break;
3810 /* request was denied due to either conflict or grace period */
3811 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3812 error = EAGAIN;
3813 break;
3814 }
3815 if (flocknflp) {
3816 /* release any currently held shared lock before sleeping */
3817 nfs_open_state_clear_busy(np);
3818 busy = 0;
3819 nfs_mount_state_in_use_end(nmp, 0);
3820 inuse = 0;
3821 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3822 flocknflp = NULL;
3823 if (!error2)
3824 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3825 if (!error2) {
3826 inuse = 1;
3827 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
3828 }
3829 if (error2) {
3830 error = error2;
3831 break;
3832 }
3833 busy = 1;
3834 }
3835 /*
3836 * Wait a little bit and send the request again.
3837 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3838 */
3839 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
3840 if (error == NFSERR_GRACE)
3841 delay = 4;
3842 if (delay < 4)
3843 delay++;
3844 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2));
3845 slpflag = 0;
3846 }
3847 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3848 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3849 /* looks like we have a recover pending... restart */
3850 nfs_open_state_clear_busy(np);
3851 busy = 0;
3852 nfs_mount_state_in_use_end(nmp, 0);
3853 inuse = 0;
3854 goto restart;
3855 }
3856 if (!error && (np->n_flag & NREVOKE))
3857 error = EIO;
3858 } while (!error);
3859
3860 error_out:
3861 if (nfs_mount_state_error_should_restart(error)) {
3862 /* looks like we need to restart this operation */
3863 if (busy) {
3864 nfs_open_state_clear_busy(np);
3865 busy = 0;
3866 }
3867 if (inuse) {
3868 nfs_mount_state_in_use_end(nmp, error);
3869 inuse = 0;
3870 }
3871 goto restart;
3872 }
3873 lck_mtx_lock(&np->n_openlock);
3874 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3875 if (error) {
3876 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3877 if (newnflp->nfl_blockcnt) {
3878 /* wake up anyone blocked on this lock */
3879 wakeup(newnflp);
3880 } else {
3881 /* remove newnflp from lock list and destroy */
3882 if (inqueue)
3883 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3884 nfs_file_lock_destroy(newnflp);
3885 }
3886 lck_mtx_unlock(&np->n_openlock);
3887 if (busy)
3888 nfs_open_state_clear_busy(np);
3889 if (inuse)
3890 nfs_mount_state_in_use_end(nmp, error);
3891 if (nflp2)
3892 nfs_file_lock_destroy(nflp2);
3893 return (error);
3894 }
3895
3896 /* server granted the lock */
3897
3898 /*
3899 * Scan for locks to update.
3900 *
3901 * Locks completely covered are killed.
3902 * At most two locks may need to be clipped.
3903 * It's possible that a single lock may need to be split.
3904 */
3905 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3906 if (nflp == newnflp)
3907 continue;
3908 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3909 continue;
3910 if (nflp->nfl_owner != nlop)
3911 continue;
3912 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3913 continue;
3914 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3915 continue;
3916 /* here's one to update */
3917 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3918 /* The entire lock is being replaced. */
3919 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3920 lck_mtx_lock(&nlop->nlo_lock);
3921 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3922 lck_mtx_unlock(&nlop->nlo_lock);
3923 /* lock will be destroyed below, if no waiters */
3924 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3925 /* We're replacing a range in the middle of a lock. */
3926 /* The current lock will be split into two locks. */
3927 /* Update locks and insert new lock after current lock. */
3928 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
3929 nflp2->nfl_type = nflp->nfl_type;
3930 nflp2->nfl_start = newnflp->nfl_end + 1;
3931 nflp2->nfl_end = nflp->nfl_end;
3932 nflp->nfl_end = newnflp->nfl_start - 1;
3933 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3934 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3935 nextnflp = nflp2;
3936 nflp2 = NULL;
3937 } else if (newnflp->nfl_start > nflp->nfl_start) {
3938 /* We're replacing the end of a lock. */
3939 nflp->nfl_end = newnflp->nfl_start - 1;
3940 } else if (newnflp->nfl_end < nflp->nfl_end) {
3941 /* We're replacing the start of a lock. */
3942 nflp->nfl_start = newnflp->nfl_end + 1;
3943 }
3944 if (nflp->nfl_blockcnt) {
3945 /* wake up anyone blocked on this lock */
3946 wakeup(nflp);
3947 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3948 /* remove nflp from lock list and destroy */
3949 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3950 nfs_file_lock_destroy(nflp);
3951 }
3952 }
3953
3954 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3955
3956 /*
3957 * POSIX locks should be coalesced when possible.
3958 */
3959 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3960 /*
3961 * Walk through the lock queue and check each of our held locks with
3962 * the previous and next locks in the lock owner's "held lock list".
3963 * If the two locks can be coalesced, we merge the current lock into
3964 * the other (previous or next) lock. Merging this way makes sure that
3965 * lock ranges are always merged forward in the lock queue. This is
3966 * important because anyone blocked on the lock being "merged away"
3967 * will still need to block on that range and it will simply continue
3968 * checking locks that are further down the list.
3969 */
3970 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3971 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3972 continue;
3973 if (nflp->nfl_owner != nlop)
3974 continue;
3975 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3976 continue;
3977 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3978 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3979 (coalnflp->nfl_type == nflp->nfl_type) &&
3980 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3981 coalnflp->nfl_end = nflp->nfl_end;
3982 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3983 lck_mtx_lock(&nlop->nlo_lock);
3984 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3985 lck_mtx_unlock(&nlop->nlo_lock);
3986 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3987 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3988 (coalnflp->nfl_type == nflp->nfl_type) &&
3989 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
3990 coalnflp->nfl_start = nflp->nfl_start;
3991 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3992 lck_mtx_lock(&nlop->nlo_lock);
3993 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3994 lck_mtx_unlock(&nlop->nlo_lock);
3995 }
3996 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
3997 continue;
3998 if (nflp->nfl_blockcnt) {
3999 /* wake up anyone blocked on this lock */
4000 wakeup(nflp);
4001 } else {
4002 /* remove nflp from lock list and destroy */
4003 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4004 nfs_file_lock_destroy(nflp);
4005 }
4006 }
4007 }
4008
4009 lck_mtx_unlock(&np->n_openlock);
4010 nfs_open_state_clear_busy(np);
4011 nfs_mount_state_in_use_end(nmp, error);
4012
4013 if (nflp2)
4014 nfs_file_lock_destroy(nflp2);
4015 return (error);
4016 }
4017
4018 /*
4019 * Release all (same style) locks within the given range.
4020 */
4021 int
4022 nfs_advlock_unlock(
4023 nfsnode_t np,
4024 struct nfs_open_file *nofp,
4025 struct nfs_lock_owner *nlop,
4026 uint64_t start,
4027 uint64_t end,
4028 int style,
4029 vfs_context_t ctx)
4030 {
4031 struct nfsmount *nmp;
4032 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4033 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4034
4035 nmp = NFSTONMP(np);
4036 if (nfs_mount_gone(nmp))
4037 return (ENXIO);
4038
4039 restart:
4040 if ((error = nfs_mount_state_in_use_start(nmp, NULL)))
4041 return (error);
4042 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4043 nfs_mount_state_in_use_end(nmp, 0);
4044 error = nfs4_reopen(nofp, NULL);
4045 if (error)
4046 return (error);
4047 goto restart;
4048 }
4049 if ((error = nfs_open_state_set_busy(np, NULL))) {
4050 nfs_mount_state_in_use_end(nmp, error);
4051 return (error);
4052 }
4053
4054 lck_mtx_lock(&np->n_openlock);
4055 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4056 /*
4057 * We may need to allocate a new lock if an existing lock gets split.
4058 * So, we first scan the list to check for a split, and if there's
4059 * going to be one, we'll allocate one now.
4060 */
4061 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4062 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4063 continue;
4064 if (nflp->nfl_owner != nlop)
4065 continue;
4066 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4067 continue;
4068 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4069 continue;
4070 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4071 willsplit = 1;
4072 break;
4073 }
4074 }
4075 if (willsplit) {
4076 lck_mtx_unlock(&np->n_openlock);
4077 nfs_open_state_clear_busy(np);
4078 nfs_mount_state_in_use_end(nmp, 0);
4079 newnflp = nfs_file_lock_alloc(nlop);
4080 if (!newnflp)
4081 return (ENOMEM);
4082 goto restart;
4083 }
4084 }
4085
4086 /*
4087 * Free all of our locks in the given range.
4088 *
4089 * Note that this process requires sending requests to the server.
4090 * Because of this, we will release the n_openlock while performing
4091 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4092 * locks from changing underneath us. However, other entries in the
4093 * list may be removed. So we need to be careful walking the list.
4094 */
4095
4096 /*
4097 * Don't unlock ranges that are held by other-style locks.
4098 * If style is posix, don't send any unlock rpcs if flock is held.
4099 * If we unlock an flock, don't send unlock rpcs for any posix-style
4100 * ranges held - instead send unlocks for the ranges not held.
4101 */
4102 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4103 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4104 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
4105 send_unlock_rpcs = 0;
4106 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4107 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4108 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4109 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4110 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4111 uint64_t s = 0;
4112 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4113 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4114 while (!delegated && nflp) {
4115 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4116 /* unlock the range preceding this lock */
4117 lck_mtx_unlock(&np->n_openlock);
4118 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0,
4119 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4120 if (nfs_mount_state_error_should_restart(error)) {
4121 nfs_open_state_clear_busy(np);
4122 nfs_mount_state_in_use_end(nmp, error);
4123 goto restart;
4124 }
4125 lck_mtx_lock(&np->n_openlock);
4126 if (error)
4127 goto out;
4128 s = nflp->nfl_end+1;
4129 }
4130 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4131 }
4132 if (!delegated) {
4133 lck_mtx_unlock(&np->n_openlock);
4134 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4135 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4136 if (nfs_mount_state_error_should_restart(error)) {
4137 nfs_open_state_clear_busy(np);
4138 nfs_mount_state_in_use_end(nmp, error);
4139 goto restart;
4140 }
4141 lck_mtx_lock(&np->n_openlock);
4142 if (error)
4143 goto out;
4144 }
4145 send_unlock_rpcs = 0;
4146 }
4147
4148 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4149 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4150 continue;
4151 if (nflp->nfl_owner != nlop)
4152 continue;
4153 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4154 continue;
4155 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4156 continue;
4157 /* here's one to unlock */
4158 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4159 /* The entire lock is being unlocked. */
4160 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4161 lck_mtx_unlock(&np->n_openlock);
4162 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4163 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4164 if (nfs_mount_state_error_should_restart(error)) {
4165 nfs_open_state_clear_busy(np);
4166 nfs_mount_state_in_use_end(nmp, error);
4167 goto restart;
4168 }
4169 lck_mtx_lock(&np->n_openlock);
4170 }
4171 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4172 if (error)
4173 break;
4174 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4175 lck_mtx_lock(&nlop->nlo_lock);
4176 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4177 lck_mtx_unlock(&nlop->nlo_lock);
4178 /* lock will be destroyed below, if no waiters */
4179 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4180 /* We're unlocking a range in the middle of a lock. */
4181 /* The current lock will be split into two locks. */
4182 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4183 lck_mtx_unlock(&np->n_openlock);
4184 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4185 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4186 if (nfs_mount_state_error_should_restart(error)) {
4187 nfs_open_state_clear_busy(np);
4188 nfs_mount_state_in_use_end(nmp, error);
4189 goto restart;
4190 }
4191 lck_mtx_lock(&np->n_openlock);
4192 }
4193 if (error)
4194 break;
4195 /* update locks and insert new lock after current lock */
4196 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
4197 newnflp->nfl_type = nflp->nfl_type;
4198 newnflp->nfl_start = end + 1;
4199 newnflp->nfl_end = nflp->nfl_end;
4200 nflp->nfl_end = start - 1;
4201 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4202 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4203 nextnflp = newnflp;
4204 newnflp = NULL;
4205 } else if (start > nflp->nfl_start) {
4206 /* We're unlocking the end of a lock. */
4207 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4208 lck_mtx_unlock(&np->n_openlock);
4209 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4210 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4211 if (nfs_mount_state_error_should_restart(error)) {
4212 nfs_open_state_clear_busy(np);
4213 nfs_mount_state_in_use_end(nmp, error);
4214 goto restart;
4215 }
4216 lck_mtx_lock(&np->n_openlock);
4217 }
4218 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4219 if (error)
4220 break;
4221 nflp->nfl_end = start - 1;
4222 } else if (end < nflp->nfl_end) {
4223 /* We're unlocking the start of a lock. */
4224 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4225 lck_mtx_unlock(&np->n_openlock);
4226 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4227 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4228 if (nfs_mount_state_error_should_restart(error)) {
4229 nfs_open_state_clear_busy(np);
4230 nfs_mount_state_in_use_end(nmp, error);
4231 goto restart;
4232 }
4233 lck_mtx_lock(&np->n_openlock);
4234 }
4235 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4236 if (error)
4237 break;
4238 nflp->nfl_start = end + 1;
4239 }
4240 if (nflp->nfl_blockcnt) {
4241 /* wake up anyone blocked on this lock */
4242 wakeup(nflp);
4243 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4244 /* remove nflp from lock list and destroy */
4245 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4246 nfs_file_lock_destroy(nflp);
4247 }
4248 }
4249 out:
4250 lck_mtx_unlock(&np->n_openlock);
4251 nfs_open_state_clear_busy(np);
4252 nfs_mount_state_in_use_end(nmp, 0);
4253
4254 if (newnflp)
4255 nfs_file_lock_destroy(newnflp);
4256 return (error);
4257 }
4258
4259 /*
4260 * NFSv4 advisory file locking
4261 */
4262 int
4263 nfs_vnop_advlock(
4264 struct vnop_advlock_args /* {
4265 struct vnodeop_desc *a_desc;
4266 vnode_t a_vp;
4267 caddr_t a_id;
4268 int a_op;
4269 struct flock *a_fl;
4270 int a_flags;
4271 vfs_context_t a_context;
4272 } */ *ap)
4273 {
4274 vnode_t vp = ap->a_vp;
4275 nfsnode_t np = VTONFS(ap->a_vp);
4276 struct flock *fl = ap->a_fl;
4277 int op = ap->a_op;
4278 int flags = ap->a_flags;
4279 vfs_context_t ctx = ap->a_context;
4280 struct nfsmount *nmp;
4281 struct nfs_open_owner *noop = NULL;
4282 struct nfs_open_file *nofp = NULL;
4283 struct nfs_lock_owner *nlop = NULL;
4284 off_t lstart;
4285 uint64_t start, end;
4286 int error = 0, modified, style;
4287 enum vtype vtype;
4288 #define OFF_MAX QUAD_MAX
4289
4290 nmp = VTONMP(ap->a_vp);
4291 if (nfs_mount_gone(nmp))
4292 return (ENXIO);
4293 lck_mtx_lock(&nmp->nm_lock);
4294 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4295 lck_mtx_unlock(&nmp->nm_lock);
4296 return (ENOTSUP);
4297 }
4298 lck_mtx_unlock(&nmp->nm_lock);
4299
4300 if (np->n_flag & NREVOKE)
4301 return (EIO);
4302 vtype = vnode_vtype(ap->a_vp);
4303 if (vtype == VDIR) /* ignore lock requests on directories */
4304 return (0);
4305 if (vtype != VREG) /* anything other than regular files is invalid */
4306 return (EINVAL);
4307
4308 /* Convert the flock structure into a start and end. */
4309 switch (fl->l_whence) {
4310 case SEEK_SET:
4311 case SEEK_CUR:
4312 /*
4313 * Caller is responsible for adding any necessary offset
4314 * to fl->l_start when SEEK_CUR is used.
4315 */
4316 lstart = fl->l_start;
4317 break;
4318 case SEEK_END:
4319 /* need to flush, and refetch attributes to make */
4320 /* sure we have the correct end of file offset */
4321 if ((error = nfs_node_lock(np)))
4322 return (error);
4323 modified = (np->n_flag & NMODIFIED);
4324 nfs_node_unlock(np);
4325 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
4326 return (error);
4327 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED)))
4328 return (error);
4329 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4330 if ((np->n_size > OFF_MAX) ||
4331 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
4332 error = EOVERFLOW;
4333 lstart = np->n_size + fl->l_start;
4334 nfs_data_unlock(np);
4335 if (error)
4336 return (error);
4337 break;
4338 default:
4339 return (EINVAL);
4340 }
4341 if (lstart < 0)
4342 return (EINVAL);
4343 start = lstart;
4344 if (fl->l_len == 0) {
4345 end = UINT64_MAX;
4346 } else if (fl->l_len > 0) {
4347 if ((fl->l_len - 1) > (OFF_MAX - lstart))
4348 return (EOVERFLOW);
4349 end = start - 1 + fl->l_len;
4350 } else { /* l_len is negative */
4351 if ((lstart + fl->l_len) < 0)
4352 return (EINVAL);
4353 end = start - 1;
4354 start += fl->l_len;
4355 }
4356 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX))))
4357 return (EINVAL);
4358
4359 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4360 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
4361 return (EINVAL);
4362
4363 /* find the lock owner, alloc if not unlock */
4364 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4365 if (!nlop) {
4366 error = (op == F_UNLCK) ? 0 : ENOMEM;
4367 if (error)
4368 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4369 goto out;
4370 }
4371
4372 if (op == F_GETLK) {
4373 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4374 } else {
4375 /* find the open owner */
4376 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4377 if (!noop) {
4378 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4379 error = EPERM;
4380 goto out;
4381 }
4382 /* find the open file */
4383 restart:
4384 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4385 if (error)
4386 error = EBADF;
4387 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4388 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4389 error = EIO;
4390 }
4391 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4392 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4393 nofp = NULL;
4394 if (!error)
4395 goto restart;
4396 }
4397 if (error) {
4398 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4399 goto out;
4400 }
4401 if (op == F_UNLCK) {
4402 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4403 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4404 if ((op == F_SETLK) && (flags & F_WAIT))
4405 op = F_SETLKW;
4406 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4407 } else {
4408 /* not getlk, unlock or lock? */
4409 error = EINVAL;
4410 }
4411 }
4412
4413 out:
4414 if (nlop)
4415 nfs_lock_owner_rele(nlop);
4416 if (noop)
4417 nfs_open_owner_rele(noop);
4418 return (error);
4419 }
4420
4421 /*
4422 * Check if an open owner holds any locks on a file.
4423 */
4424 int
4425 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4426 {
4427 struct nfs_lock_owner *nlop;
4428
4429 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4430 if (nlop->nlo_open_owner != noop)
4431 continue;
4432 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4433 break;
4434 }
4435 return (nlop ? 1 : 0);
4436 }
4437
4438 /*
4439 * Reopen simple (no deny, no locks) open state that was lost.
4440 */
4441 int
4442 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4443 {
4444 struct nfs_open_owner *noop = nofp->nof_owner;
4445 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4446 nfsnode_t np = nofp->nof_np;
4447 vnode_t vp = NFSTOV(np);
4448 vnode_t dvp = NULL;
4449 struct componentname cn;
4450 const char *vname = NULL;
4451 const char *name = NULL;
4452 size_t namelen;
4453 char smallname[128];
4454 char *filename = NULL;
4455 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4456 struct timespec ts = { 1, 0 };
4457
4458 lck_mtx_lock(&nofp->nof_lock);
4459 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4460 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4461 break;
4462 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
4463 slpflag = 0;
4464 }
4465 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4466 lck_mtx_unlock(&nofp->nof_lock);
4467 return (error);
4468 }
4469 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4470 lck_mtx_unlock(&nofp->nof_lock);
4471
4472 nfs_node_lock_force(np);
4473 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4474 /*
4475 * The node's been sillyrenamed, so we need to use
4476 * the sillyrename directory/name to do the open.
4477 */
4478 struct nfs_sillyrename *nsp = np->n_sillyrename;
4479 dvp = NFSTOV(nsp->nsr_dnp);
4480 if ((error = vnode_get(dvp))) {
4481 nfs_node_unlock(np);
4482 goto out;
4483 }
4484 name = nsp->nsr_name;
4485 } else {
4486 /*
4487 * [sigh] We can't trust VFS to get the parent right for named
4488 * attribute nodes. (It likes to reparent the nodes after we've
4489 * created them.) Luckily we can probably get the right parent
4490 * from the n_parent we have stashed away.
4491 */
4492 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4493 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4494 dvp = NULL;
4495 if (!dvp)
4496 dvp = vnode_getparent(vp);
4497 vname = vnode_getname(vp);
4498 if (!dvp || !vname) {
4499 if (!error)
4500 error = EIO;
4501 nfs_node_unlock(np);
4502 goto out;
4503 }
4504 name = vname;
4505 }
4506 filename = &smallname[0];
4507 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4508 if (namelen >= sizeof(smallname)) {
4509 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
4510 if (!filename) {
4511 error = ENOMEM;
4512 goto out;
4513 }
4514 snprintf(filename, namelen+1, "%s", name);
4515 }
4516 nfs_node_unlock(np);
4517 bzero(&cn, sizeof(cn));
4518 cn.cn_nameptr = filename;
4519 cn.cn_namelen = namelen;
4520
4521 restart:
4522 done = 0;
4523 if ((error = nfs_mount_state_in_use_start(nmp, thd)))
4524 goto out;
4525
4526 if (nofp->nof_rw)
4527 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4528 if (!error && nofp->nof_w)
4529 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4530 if (!error && nofp->nof_r)
4531 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4532
4533 if (nfs_mount_state_in_use_end(nmp, error)) {
4534 if (error == NFSERR_GRACE)
4535 goto restart;
4536 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4537 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4538 error = 0;
4539 goto out;
4540 }
4541 done = 1;
4542 out:
4543 if (error && (error != EINTR) && (error != ERESTART))
4544 nfs_revoke_open_state_for_node(np);
4545 lck_mtx_lock(&nofp->nof_lock);
4546 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4547 if (done)
4548 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4549 else if (error)
4550 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4551 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4552 lck_mtx_unlock(&nofp->nof_lock);
4553 if (filename && (filename != &smallname[0]))
4554 FREE(filename, M_TEMP);
4555 if (vname)
4556 vnode_putname(vname);
4557 if (dvp != NULLVP)
4558 vnode_put(dvp);
4559 return (error);
4560 }
4561
4562 /*
4563 * Send a normal OPEN RPC to open/create a file.
4564 */
4565 int
4566 nfs4_open_rpc(
4567 struct nfs_open_file *nofp,
4568 vfs_context_t ctx,
4569 struct componentname *cnp,
4570 struct vnode_attr *vap,
4571 vnode_t dvp,
4572 vnode_t *vpp,
4573 int create,
4574 int share_access,
4575 int share_deny)
4576 {
4577 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4578 cnp, vap, dvp, vpp, create, share_access, share_deny));
4579 }
4580
4581 /*
4582 * Send an OPEN RPC to reopen a file.
4583 */
4584 int
4585 nfs4_open_reopen_rpc(
4586 struct nfs_open_file *nofp,
4587 thread_t thd,
4588 kauth_cred_t cred,
4589 struct componentname *cnp,
4590 vnode_t dvp,
4591 vnode_t *vpp,
4592 int share_access,
4593 int share_deny)
4594 {
4595 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny));
4596 }
4597
4598 /*
4599 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4600 */
4601 int
4602 nfs4_open_confirm_rpc(
4603 struct nfsmount *nmp,
4604 nfsnode_t dnp,
4605 u_char *fhp,
4606 int fhlen,
4607 struct nfs_open_owner *noop,
4608 nfs_stateid *sid,
4609 thread_t thd,
4610 kauth_cred_t cred,
4611 struct nfs_vattr *nvap,
4612 uint64_t *xidp)
4613 {
4614 struct nfsm_chain nmreq, nmrep;
4615 int error = 0, status, numops;
4616 struct nfsreq_secinfo_args si;
4617
4618 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
4619 nfsm_chain_null(&nmreq);
4620 nfsm_chain_null(&nmrep);
4621
4622 // PUTFH, OPEN_CONFIRM, GETATTR
4623 numops = 3;
4624 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4625 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
4626 numops--;
4627 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4628 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4629 numops--;
4630 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4631 nfsm_chain_add_stateid(error, &nmreq, sid);
4632 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4633 numops--;
4634 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4635 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4636 nfsm_chain_build_done(error, &nmreq);
4637 nfsm_assert(error, (numops == 0), EPROTO);
4638 nfsmout_if(error);
4639 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
4640
4641 nfsm_chain_skip_tag(error, &nmrep);
4642 nfsm_chain_get_32(error, &nmrep, numops);
4643 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4644 nfsmout_if(error);
4645 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4646 nfs_owner_seqid_increment(noop, NULL, error);
4647 nfsm_chain_get_stateid(error, &nmrep, sid);
4648 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4649 nfsmout_if(error);
4650 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
4651 nfsmout:
4652 nfsm_chain_cleanup(&nmreq);
4653 nfsm_chain_cleanup(&nmrep);
4654 return (error);
4655 }
4656
4657 /*
4658 * common OPEN RPC code
4659 *
4660 * If create is set, ctx must be passed in.
4661 * Returns a node on success if no node passed in.
4662 */
4663 int
4664 nfs4_open_rpc_internal(
4665 struct nfs_open_file *nofp,
4666 vfs_context_t ctx,
4667 thread_t thd,
4668 kauth_cred_t cred,
4669 struct componentname *cnp,
4670 struct vnode_attr *vap,
4671 vnode_t dvp,
4672 vnode_t *vpp,
4673 int create,
4674 int share_access,
4675 int share_deny)
4676 {
4677 struct nfsmount *nmp;
4678 struct nfs_open_owner *noop = nofp->nof_owner;
4679 struct nfs_vattr nvattr;
4680 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
4681 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
4682 u_int64_t xid, savedxid = 0;
4683 nfsnode_t dnp = VTONFS(dvp);
4684 nfsnode_t np, newnp = NULL;
4685 vnode_t newvp = NULL;
4686 struct nfsm_chain nmreq, nmrep;
4687 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4688 uint32_t rflags, delegation, recall;
4689 struct nfs_stateid stateid, dstateid, *sid;
4690 fhandle_t fh;
4691 struct nfsreq rq, *req = &rq;
4692 struct nfs_dulookup dul;
4693 char sbuf[64], *s;
4694 uint32_t ace_type, ace_flags, ace_mask, len, slen;
4695 struct kauth_ace ace;
4696 struct nfsreq_secinfo_args si;
4697
4698 if (create && !ctx)
4699 return (EINVAL);
4700
4701 nmp = VTONMP(dvp);
4702 if (nfs_mount_gone(nmp))
4703 return (ENXIO);
4704 nfsvers = nmp->nm_vers;
4705 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4706 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
4707 return (EINVAL);
4708
4709 np = *vpp ? VTONFS(*vpp) : NULL;
4710 if (create && vap) {
4711 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4712 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4713 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4714 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4715 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)))
4716 vap->va_vaflags |= VA_UTIMES_NULL;
4717 } else {
4718 exclusive = gotuid = gotgid = 0;
4719 }
4720 if (nofp) {
4721 sid = &nofp->nof_stateid;
4722 } else {
4723 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4724 sid = &stateid;
4725 }
4726
4727 if ((error = nfs_open_owner_set_busy(noop, thd)))
4728 return (error);
4729 again:
4730 rflags = delegation = recall = 0;
4731 ace.ace_flags = 0;
4732 s = sbuf;
4733 slen = sizeof(sbuf);
4734 NVATTR_INIT(&nvattr);
4735 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
4736
4737 nfsm_chain_null(&nmreq);
4738 nfsm_chain_null(&nmrep);
4739
4740 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4741 numops = 6;
4742 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4743 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
4744 numops--;
4745 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4746 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4747 numops--;
4748 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4749 numops--;
4750 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4751 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4752 nfsm_chain_add_32(error, &nmreq, share_access);
4753 nfsm_chain_add_32(error, &nmreq, share_deny);
4754 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
4755 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4756 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
4757 nfsm_chain_add_32(error, &nmreq, create);
4758 if (create) {
4759 if (exclusive) {
4760 static uint32_t create_verf; // XXX need a better verifier
4761 create_verf++;
4762 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4763 /* insert 64 bit verifier */
4764 nfsm_chain_add_32(error, &nmreq, create_verf);
4765 nfsm_chain_add_32(error, &nmreq, create_verf);
4766 } else {
4767 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4768 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4769 }
4770 }
4771 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
4772 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4773 numops--;
4774 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4775 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4776 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4777 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
4778 numops--;
4779 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4780 numops--;
4781 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4782 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4783 nfsm_chain_build_done(error, &nmreq);
4784 nfsm_assert(error, (numops == 0), EPROTO);
4785 if (!error)
4786 error = busyerror = nfs_node_set_busy(dnp, thd);
4787 nfsmout_if(error);
4788
4789 if (create && !namedattrs)
4790 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4791
4792 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
4793 if (!error) {
4794 if (create && !namedattrs)
4795 nfs_dulookup_start(&dul, dnp, ctx);
4796 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4797 savedxid = xid;
4798 }
4799
4800 if (create && !namedattrs)
4801 nfs_dulookup_finish(&dul, dnp, ctx);
4802
4803 if ((lockerror = nfs_node_lock(dnp)))
4804 error = lockerror;
4805 nfsm_chain_skip_tag(error, &nmrep);
4806 nfsm_chain_get_32(error, &nmrep, numops);
4807 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4808 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4809 nfsmout_if(error);
4810 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4811 nfs_owner_seqid_increment(noop, NULL, error);
4812 nfsm_chain_get_stateid(error, &nmrep, sid);
4813 nfsm_chain_check_change_info(error, &nmrep, dnp);
4814 nfsm_chain_get_32(error, &nmrep, rflags);
4815 bmlen = NFS_ATTR_BITMAP_LEN;
4816 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4817 nfsm_chain_get_32(error, &nmrep, delegation);
4818 if (!error)
4819 switch (delegation) {
4820 case NFS_OPEN_DELEGATE_NONE:
4821 break;
4822 case NFS_OPEN_DELEGATE_READ:
4823 case NFS_OPEN_DELEGATE_WRITE:
4824 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4825 nfsm_chain_get_32(error, &nmrep, recall);
4826 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
4827 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4828 /* if we have any trouble accepting the ACE, just invalidate it */
4829 ace_type = ace_flags = ace_mask = len = 0;
4830 nfsm_chain_get_32(error, &nmrep, ace_type);
4831 nfsm_chain_get_32(error, &nmrep, ace_flags);
4832 nfsm_chain_get_32(error, &nmrep, ace_mask);
4833 nfsm_chain_get_32(error, &nmrep, len);
4834 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
4835 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
4836 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
4837 if (!error && (len >= slen)) {
4838 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
4839 if (s)
4840 slen = len+1;
4841 else
4842 ace.ace_flags = 0;
4843 }
4844 if (s)
4845 nfsm_chain_get_opaque(error, &nmrep, len, s);
4846 else
4847 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
4848 if (!error && s) {
4849 s[len] = '\0';
4850 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
4851 ace.ace_flags = 0;
4852 }
4853 if (error || !s)
4854 ace.ace_flags = 0;
4855 if (s && (s != sbuf))
4856 FREE(s, M_TEMP);
4857 break;
4858 default:
4859 error = EBADRPC;
4860 break;
4861 }
4862 /* At this point if we have no error, the object was created/opened. */
4863 open_error = error;
4864 nfsmout_if(error);
4865 if (create && vap && !exclusive)
4866 nfs_vattr_set_supported(bitmap, vap);
4867 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4868 nfsmout_if(error);
4869 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
4870 nfsmout_if(error);
4871 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4872 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
4873 error = EBADRPC;
4874 goto nfsmout;
4875 }
4876 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4877 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4878 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4879 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
4880 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
4881 }
4882 /* directory attributes: if we don't get them, make sure to invalidate */
4883 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4884 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4885 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
4886 if (error)
4887 NATTRINVALIDATE(dnp);
4888 nfsmout_if(error);
4889
4890 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4891 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4892
4893 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4894 nfs_node_unlock(dnp);
4895 lockerror = ENOENT;
4896 NVATTR_CLEANUP(&nvattr);
4897 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
4898 nfsmout_if(error);
4899 savedxid = xid;
4900 if ((lockerror = nfs_node_lock(dnp)))
4901 error = lockerror;
4902 }
4903
4904 nfsmout:
4905 nfsm_chain_cleanup(&nmreq);
4906 nfsm_chain_cleanup(&nmrep);
4907
4908 if (!lockerror && create) {
4909 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4910 dnp->n_flag &= ~NNEGNCENTRIES;
4911 cache_purge_negatives(dvp);
4912 }
4913 dnp->n_flag |= NMODIFIED;
4914 nfs_node_unlock(dnp);
4915 lockerror = ENOENT;
4916 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4917 }
4918 if (!lockerror)
4919 nfs_node_unlock(dnp);
4920 if (!error && !np && fh.fh_len) {
4921 /* create the vnode with the filehandle and attributes */
4922 xid = savedxid;
4923 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
4924 if (!error)
4925 newvp = NFSTOV(newnp);
4926 }
4927 NVATTR_CLEANUP(&nvattr);
4928 if (!busyerror)
4929 nfs_node_clear_busy(dnp);
4930 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4931 if (!np)
4932 np = newnp;
4933 if (!error && np && !recall) {
4934 /* stuff the delegation state in the node */
4935 lck_mtx_lock(&np->n_openlock);
4936 np->n_openflags &= ~N_DELEG_MASK;
4937 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4938 np->n_dstateid = dstateid;
4939 np->n_dace = ace;
4940 if (np->n_dlink.tqe_next == NFSNOLIST) {
4941 lck_mtx_lock(&nmp->nm_lock);
4942 if (np->n_dlink.tqe_next == NFSNOLIST)
4943 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4944 lck_mtx_unlock(&nmp->nm_lock);
4945 }
4946 lck_mtx_unlock(&np->n_openlock);
4947 } else {
4948 /* give the delegation back */
4949 if (np) {
4950 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4951 /* update delegation state and return it */
4952 lck_mtx_lock(&np->n_openlock);
4953 np->n_openflags &= ~N_DELEG_MASK;
4954 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4955 np->n_dstateid = dstateid;
4956 np->n_dace = ace;
4957 if (np->n_dlink.tqe_next == NFSNOLIST) {
4958 lck_mtx_lock(&nmp->nm_lock);
4959 if (np->n_dlink.tqe_next == NFSNOLIST)
4960 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4961 lck_mtx_unlock(&nmp->nm_lock);
4962 }
4963 lck_mtx_unlock(&np->n_openlock);
4964 /* don't need to send a separate delegreturn for fh */
4965 fh.fh_len = 0;
4966 }
4967 /* return np's current delegation */
4968 nfs4_delegation_return(np, 0, thd, cred);
4969 }
4970 if (fh.fh_len) /* return fh's delegation if it wasn't for np */
4971 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
4972 }
4973 }
4974 if (error) {
4975 if (exclusive && (error == NFSERR_NOTSUPP)) {
4976 exclusive = 0;
4977 goto again;
4978 }
4979 if (newvp) {
4980 nfs_node_unlock(newnp);
4981 vnode_put(newvp);
4982 }
4983 } else if (create) {
4984 nfs_node_unlock(newnp);
4985 if (exclusive) {
4986 error = nfs4_setattr_rpc(newnp, vap, ctx);
4987 if (error && (gotuid || gotgid)) {
4988 /* it's possible the server didn't like our attempt to set IDs. */
4989 /* so, let's try it again without those */
4990 VATTR_CLEAR_ACTIVE(vap, va_uid);
4991 VATTR_CLEAR_ACTIVE(vap, va_gid);
4992 error = nfs4_setattr_rpc(newnp, vap, ctx);
4993 }
4994 }
4995 if (error)
4996 vnode_put(newvp);
4997 else
4998 *vpp = newvp;
4999 }
5000 nfs_open_owner_clear_busy(noop);
5001 return (error);
5002 }
5003
5004
5005 /*
5006 * Send an OPEN RPC to claim a delegated open for a file
5007 */
5008 int
5009 nfs4_claim_delegated_open_rpc(
5010 struct nfs_open_file *nofp,
5011 int share_access,
5012 int share_deny,
5013 int flags)
5014 {
5015 struct nfsmount *nmp;
5016 struct nfs_open_owner *noop = nofp->nof_owner;
5017 struct nfs_vattr nvattr;
5018 int error = 0, lockerror = ENOENT, status;
5019 int nfsvers, numops;
5020 u_int64_t xid;
5021 nfsnode_t np = nofp->nof_np;
5022 struct nfsm_chain nmreq, nmrep;
5023 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5024 uint32_t rflags = 0, delegation, recall = 0;
5025 fhandle_t fh;
5026 struct nfs_stateid dstateid;
5027 char sbuf[64], *s = sbuf;
5028 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5029 struct kauth_ace ace;
5030 vnode_t dvp = NULL;
5031 const char *vname = NULL;
5032 const char *name = NULL;
5033 size_t namelen;
5034 char smallname[128];
5035 char *filename = NULL;
5036 struct nfsreq_secinfo_args si;
5037
5038 nmp = NFSTONMP(np);
5039 if (nfs_mount_gone(nmp))
5040 return (ENXIO);
5041 nfsvers = nmp->nm_vers;
5042
5043 nfs_node_lock_force(np);
5044 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5045 /*
5046 * The node's been sillyrenamed, so we need to use
5047 * the sillyrename directory/name to do the open.
5048 */
5049 struct nfs_sillyrename *nsp = np->n_sillyrename;
5050 dvp = NFSTOV(nsp->nsr_dnp);
5051 if ((error = vnode_get(dvp))) {
5052 nfs_node_unlock(np);
5053 goto out;
5054 }
5055 name = nsp->nsr_name;
5056 } else {
5057 /*
5058 * [sigh] We can't trust VFS to get the parent right for named
5059 * attribute nodes. (It likes to reparent the nodes after we've
5060 * created them.) Luckily we can probably get the right parent
5061 * from the n_parent we have stashed away.
5062 */
5063 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5064 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
5065 dvp = NULL;
5066 if (!dvp)
5067 dvp = vnode_getparent(NFSTOV(np));
5068 vname = vnode_getname(NFSTOV(np));
5069 if (!dvp || !vname) {
5070 if (!error)
5071 error = EIO;
5072 nfs_node_unlock(np);
5073 goto out;
5074 }
5075 name = vname;
5076 }
5077 filename = &smallname[0];
5078 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5079 if (namelen >= sizeof(smallname)) {
5080 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
5081 if (!filename) {
5082 error = ENOMEM;
5083 nfs_node_unlock(np);
5084 goto out;
5085 }
5086 snprintf(filename, namelen+1, "%s", name);
5087 }
5088 nfs_node_unlock(np);
5089
5090 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5091 goto out;
5092 NVATTR_INIT(&nvattr);
5093 delegation = NFS_OPEN_DELEGATE_NONE;
5094 dstateid = np->n_dstateid;
5095 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5096
5097 nfsm_chain_null(&nmreq);
5098 nfsm_chain_null(&nmrep);
5099
5100 // PUTFH, OPEN, GETATTR(FH)
5101 numops = 3;
5102 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5103 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5104 numops--;
5105 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5106 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5107 numops--;
5108 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5109 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5110 nfsm_chain_add_32(error, &nmreq, share_access);
5111 nfsm_chain_add_32(error, &nmreq, share_deny);
5112 // open owner: clientid + uid
5113 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5114 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5115 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5116 // openflag4
5117 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5118 // open_claim4
5119 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5120 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5121 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5122 numops--;
5123 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5124 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5125 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5126 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5127 nfsm_chain_build_done(error, &nmreq);
5128 nfsm_assert(error, (numops == 0), EPROTO);
5129 nfsmout_if(error);
5130
5131 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5132 noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5133
5134 if ((lockerror = nfs_node_lock(np)))
5135 error = lockerror;
5136 nfsm_chain_skip_tag(error, &nmrep);
5137 nfsm_chain_get_32(error, &nmrep, numops);
5138 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5139 nfsmout_if(error);
5140 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5141 nfs_owner_seqid_increment(noop, NULL, error);
5142 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5143 nfsm_chain_check_change_info(error, &nmrep, np);
5144 nfsm_chain_get_32(error, &nmrep, rflags);
5145 bmlen = NFS_ATTR_BITMAP_LEN;
5146 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5147 nfsm_chain_get_32(error, &nmrep, delegation);
5148 if (!error)
5149 switch (delegation) {
5150 case NFS_OPEN_DELEGATE_NONE:
5151 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5152 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5153 break;
5154 case NFS_OPEN_DELEGATE_READ:
5155 case NFS_OPEN_DELEGATE_WRITE:
5156 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5157 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5158 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5159 (delegation == NFS_OPEN_DELEGATE_READ)))
5160 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5161 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5162 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5163 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5164 nfsm_chain_get_32(error, &nmrep, recall);
5165 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5166 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5167 /* if we have any trouble accepting the ACE, just invalidate it */
5168 ace_type = ace_flags = ace_mask = len = 0;
5169 nfsm_chain_get_32(error, &nmrep, ace_type);
5170 nfsm_chain_get_32(error, &nmrep, ace_flags);
5171 nfsm_chain_get_32(error, &nmrep, ace_mask);
5172 nfsm_chain_get_32(error, &nmrep, len);
5173 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5174 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5175 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5176 if (!error && (len >= slen)) {
5177 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5178 if (s)
5179 slen = len+1;
5180 else
5181 ace.ace_flags = 0;
5182 }
5183 if (s)
5184 nfsm_chain_get_opaque(error, &nmrep, len, s);
5185 else
5186 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5187 if (!error && s) {
5188 s[len] = '\0';
5189 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5190 ace.ace_flags = 0;
5191 }
5192 if (error || !s)
5193 ace.ace_flags = 0;
5194 if (s && (s != sbuf))
5195 FREE(s, M_TEMP);
5196 if (!error) {
5197 /* stuff the latest delegation state in the node */
5198 lck_mtx_lock(&np->n_openlock);
5199 np->n_openflags &= ~N_DELEG_MASK;
5200 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5201 np->n_dstateid = dstateid;
5202 np->n_dace = ace;
5203 if (np->n_dlink.tqe_next == NFSNOLIST) {
5204 lck_mtx_lock(&nmp->nm_lock);
5205 if (np->n_dlink.tqe_next == NFSNOLIST)
5206 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5207 lck_mtx_unlock(&nmp->nm_lock);
5208 }
5209 lck_mtx_unlock(&np->n_openlock);
5210 }
5211 break;
5212 default:
5213 error = EBADRPC;
5214 break;
5215 }
5216 nfsmout_if(error);
5217 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5218 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5219 nfsmout_if(error);
5220 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5221 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5222 error = EBADRPC;
5223 goto nfsmout;
5224 }
5225 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5226 // XXX what if fh doesn't match the vnode we think we're re-opening?
5227 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5228 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5229 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5230 }
5231 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5232 nfsmout_if(error);
5233 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5234 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5235 nfsmout:
5236 NVATTR_CLEANUP(&nvattr);
5237 nfsm_chain_cleanup(&nmreq);
5238 nfsm_chain_cleanup(&nmrep);
5239 if (!lockerror)
5240 nfs_node_unlock(np);
5241 nfs_open_owner_clear_busy(noop);
5242 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5243 if (recall) {
5244 /*
5245 * We're making a delegated claim.
5246 * Don't return the delegation here in case we have more to claim.
5247 * Just make sure it's queued up to be returned.
5248 */
5249 nfs4_delegation_return_enqueue(np);
5250 }
5251 }
5252 out:
5253 // if (!error)
5254 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5255 if (filename && (filename != &smallname[0]))
5256 FREE(filename, M_TEMP);
5257 if (vname)
5258 vnode_putname(vname);
5259 if (dvp != NULLVP)
5260 vnode_put(dvp);
5261 return (error);
5262 }
5263
5264 /*
5265 * Send an OPEN RPC to reclaim an open file.
5266 */
5267 int
5268 nfs4_open_reclaim_rpc(
5269 struct nfs_open_file *nofp,
5270 int share_access,
5271 int share_deny)
5272 {
5273 struct nfsmount *nmp;
5274 struct nfs_open_owner *noop = nofp->nof_owner;
5275 struct nfs_vattr nvattr;
5276 int error = 0, lockerror = ENOENT, status;
5277 int nfsvers, numops;
5278 u_int64_t xid;
5279 nfsnode_t np = nofp->nof_np;
5280 struct nfsm_chain nmreq, nmrep;
5281 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5282 uint32_t rflags = 0, delegation, recall = 0;
5283 fhandle_t fh;
5284 struct nfs_stateid dstateid;
5285 char sbuf[64], *s = sbuf;
5286 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5287 struct kauth_ace ace;
5288 struct nfsreq_secinfo_args si;
5289
5290 nmp = NFSTONMP(np);
5291 if (nfs_mount_gone(nmp))
5292 return (ENXIO);
5293 nfsvers = nmp->nm_vers;
5294
5295 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5296 return (error);
5297
5298 NVATTR_INIT(&nvattr);
5299 delegation = NFS_OPEN_DELEGATE_NONE;
5300 dstateid = np->n_dstateid;
5301 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5302
5303 nfsm_chain_null(&nmreq);
5304 nfsm_chain_null(&nmrep);
5305
5306 // PUTFH, OPEN, GETATTR(FH)
5307 numops = 3;
5308 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5309 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5310 numops--;
5311 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5312 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5313 numops--;
5314 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5315 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5316 nfsm_chain_add_32(error, &nmreq, share_access);
5317 nfsm_chain_add_32(error, &nmreq, share_deny);
5318 // open owner: clientid + uid
5319 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5320 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5321 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5322 // openflag4
5323 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5324 // open_claim4
5325 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5326 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5327 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5328 NFS_OPEN_DELEGATE_NONE;
5329 nfsm_chain_add_32(error, &nmreq, delegation);
5330 delegation = NFS_OPEN_DELEGATE_NONE;
5331 numops--;
5332 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5333 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5334 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5335 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5336 nfsm_chain_build_done(error, &nmreq);
5337 nfsm_assert(error, (numops == 0), EPROTO);
5338 nfsmout_if(error);
5339
5340 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5341 noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status);
5342
5343 if ((lockerror = nfs_node_lock(np)))
5344 error = lockerror;
5345 nfsm_chain_skip_tag(error, &nmrep);
5346 nfsm_chain_get_32(error, &nmrep, numops);
5347 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5348 nfsmout_if(error);
5349 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5350 nfs_owner_seqid_increment(noop, NULL, error);
5351 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5352 nfsm_chain_check_change_info(error, &nmrep, np);
5353 nfsm_chain_get_32(error, &nmrep, rflags);
5354 bmlen = NFS_ATTR_BITMAP_LEN;
5355 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5356 nfsm_chain_get_32(error, &nmrep, delegation);
5357 if (!error)
5358 switch (delegation) {
5359 case NFS_OPEN_DELEGATE_NONE:
5360 if (np->n_openflags & N_DELEG_MASK) {
5361 /*
5362 * Hey! We were supposed to get our delegation back even
5363 * if it was getting immediately recalled. Bad server!
5364 *
5365 * Just try to return the existing delegation.
5366 */
5367 // NP(np, "nfs: open reclaim didn't return delegation?");
5368 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5369 recall = 1;
5370 }
5371 break;
5372 case NFS_OPEN_DELEGATE_READ:
5373 case NFS_OPEN_DELEGATE_WRITE:
5374 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5375 nfsm_chain_get_32(error, &nmrep, recall);
5376 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5377 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5378 /* if we have any trouble accepting the ACE, just invalidate it */
5379 ace_type = ace_flags = ace_mask = len = 0;
5380 nfsm_chain_get_32(error, &nmrep, ace_type);
5381 nfsm_chain_get_32(error, &nmrep, ace_flags);
5382 nfsm_chain_get_32(error, &nmrep, ace_mask);
5383 nfsm_chain_get_32(error, &nmrep, len);
5384 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5385 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5386 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5387 if (!error && (len >= slen)) {
5388 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5389 if (s)
5390 slen = len+1;
5391 else
5392 ace.ace_flags = 0;
5393 }
5394 if (s)
5395 nfsm_chain_get_opaque(error, &nmrep, len, s);
5396 else
5397 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5398 if (!error && s) {
5399 s[len] = '\0';
5400 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5401 ace.ace_flags = 0;
5402 }
5403 if (error || !s)
5404 ace.ace_flags = 0;
5405 if (s && (s != sbuf))
5406 FREE(s, M_TEMP);
5407 if (!error) {
5408 /* stuff the delegation state in the node */
5409 lck_mtx_lock(&np->n_openlock);
5410 np->n_openflags &= ~N_DELEG_MASK;
5411 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5412 np->n_dstateid = dstateid;
5413 np->n_dace = ace;
5414 if (np->n_dlink.tqe_next == NFSNOLIST) {
5415 lck_mtx_lock(&nmp->nm_lock);
5416 if (np->n_dlink.tqe_next == NFSNOLIST)
5417 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5418 lck_mtx_unlock(&nmp->nm_lock);
5419 }
5420 lck_mtx_unlock(&np->n_openlock);
5421 }
5422 break;
5423 default:
5424 error = EBADRPC;
5425 break;
5426 }
5427 nfsmout_if(error);
5428 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5429 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5430 nfsmout_if(error);
5431 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5432 NP(np, "nfs: open reclaim didn't return filehandle?");
5433 error = EBADRPC;
5434 goto nfsmout;
5435 }
5436 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5437 // XXX what if fh doesn't match the vnode we think we're re-opening?
5438 // That should be pretty hard in this case, given that we are doing
5439 // the open reclaim using the file handle (and not a dir/name pair).
5440 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5441 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5442 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5443 }
5444 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5445 nfsmout_if(error);
5446 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5447 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5448 nfsmout:
5449 // if (!error)
5450 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5451 NVATTR_CLEANUP(&nvattr);
5452 nfsm_chain_cleanup(&nmreq);
5453 nfsm_chain_cleanup(&nmrep);
5454 if (!lockerror)
5455 nfs_node_unlock(np);
5456 nfs_open_owner_clear_busy(noop);
5457 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5458 if (recall)
5459 nfs4_delegation_return_enqueue(np);
5460 }
5461 return (error);
5462 }
5463
5464 int
5465 nfs4_open_downgrade_rpc(
5466 nfsnode_t np,
5467 struct nfs_open_file *nofp,
5468 vfs_context_t ctx)
5469 {
5470 struct nfs_open_owner *noop = nofp->nof_owner;
5471 struct nfsmount *nmp;
5472 int error, lockerror = ENOENT, status, nfsvers, numops;
5473 struct nfsm_chain nmreq, nmrep;
5474 u_int64_t xid;
5475 struct nfsreq_secinfo_args si;
5476
5477 nmp = NFSTONMP(np);
5478 if (nfs_mount_gone(nmp))
5479 return (ENXIO);
5480 nfsvers = nmp->nm_vers;
5481
5482 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5483 return (error);
5484
5485 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5486 nfsm_chain_null(&nmreq);
5487 nfsm_chain_null(&nmrep);
5488
5489 // PUTFH, OPEN_DOWNGRADE, GETATTR
5490 numops = 3;
5491 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5492 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5493 numops--;
5494 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5495 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5496 numops--;
5497 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5498 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5499 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5500 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5501 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5502 numops--;
5503 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5504 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5505 nfsm_chain_build_done(error, &nmreq);
5506 nfsm_assert(error, (numops == 0), EPROTO);
5507 nfsmout_if(error);
5508 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5509 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5510 &si, R_NOINTR, &nmrep, &xid, &status);
5511
5512 if ((lockerror = nfs_node_lock(np)))
5513 error = lockerror;
5514 nfsm_chain_skip_tag(error, &nmrep);
5515 nfsm_chain_get_32(error, &nmrep, numops);
5516 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5517 nfsmout_if(error);
5518 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5519 nfs_owner_seqid_increment(noop, NULL, error);
5520 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5521 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5522 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5523 nfsmout:
5524 if (!lockerror)
5525 nfs_node_unlock(np);
5526 nfs_open_owner_clear_busy(noop);
5527 nfsm_chain_cleanup(&nmreq);
5528 nfsm_chain_cleanup(&nmrep);
5529 return (error);
5530 }
5531
5532 int
5533 nfs4_close_rpc(
5534 nfsnode_t np,
5535 struct nfs_open_file *nofp,
5536 thread_t thd,
5537 kauth_cred_t cred,
5538 int flags)
5539 {
5540 struct nfs_open_owner *noop = nofp->nof_owner;
5541 struct nfsmount *nmp;
5542 int error, lockerror = ENOENT, status, nfsvers, numops;
5543 struct nfsm_chain nmreq, nmrep;
5544 u_int64_t xid;
5545 struct nfsreq_secinfo_args si;
5546
5547 nmp = NFSTONMP(np);
5548 if (nfs_mount_gone(nmp))
5549 return (ENXIO);
5550 nfsvers = nmp->nm_vers;
5551
5552 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5553 return (error);
5554
5555 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5556 nfsm_chain_null(&nmreq);
5557 nfsm_chain_null(&nmrep);
5558
5559 // PUTFH, CLOSE, GETATTR
5560 numops = 3;
5561 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5562 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
5563 numops--;
5564 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5565 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5566 numops--;
5567 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
5568 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5569 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5570 numops--;
5571 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5572 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5573 nfsm_chain_build_done(error, &nmreq);
5574 nfsm_assert(error, (numops == 0), EPROTO);
5575 nfsmout_if(error);
5576 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5577
5578 if ((lockerror = nfs_node_lock(np)))
5579 error = lockerror;
5580 nfsm_chain_skip_tag(error, &nmrep);
5581 nfsm_chain_get_32(error, &nmrep, numops);
5582 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5583 nfsmout_if(error);
5584 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
5585 nfs_owner_seqid_increment(noop, NULL, error);
5586 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5587 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5588 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5589 nfsmout:
5590 if (!lockerror)
5591 nfs_node_unlock(np);
5592 nfs_open_owner_clear_busy(noop);
5593 nfsm_chain_cleanup(&nmreq);
5594 nfsm_chain_cleanup(&nmrep);
5595 return (error);
5596 }
5597
5598
5599 /*
5600 * Claim the delegated open combinations this open file holds.
5601 */
5602 int
5603 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
5604 {
5605 struct nfs_open_owner *noop = nofp->nof_owner;
5606 struct nfs_lock_owner *nlop;
5607 struct nfs_file_lock *nflp, *nextnflp;
5608 struct nfsmount *nmp;
5609 int error = 0, reopen = 0;
5610
5611 if (nofp->nof_d_rw_drw) {
5612 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
5613 if (!error) {
5614 lck_mtx_lock(&nofp->nof_lock);
5615 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
5616 nofp->nof_d_rw_drw = 0;
5617 lck_mtx_unlock(&nofp->nof_lock);
5618 }
5619 }
5620 if (!error && nofp->nof_d_w_drw) {
5621 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
5622 if (!error) {
5623 lck_mtx_lock(&nofp->nof_lock);
5624 nofp->nof_w_drw += nofp->nof_d_w_drw;
5625 nofp->nof_d_w_drw = 0;
5626 lck_mtx_unlock(&nofp->nof_lock);
5627 }
5628 }
5629 if (!error && nofp->nof_d_r_drw) {
5630 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
5631 if (!error) {
5632 lck_mtx_lock(&nofp->nof_lock);
5633 nofp->nof_r_drw += nofp->nof_d_r_drw;
5634 nofp->nof_d_r_drw = 0;
5635 lck_mtx_unlock(&nofp->nof_lock);
5636 }
5637 }
5638 if (!error && nofp->nof_d_rw_dw) {
5639 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
5640 if (!error) {
5641 lck_mtx_lock(&nofp->nof_lock);
5642 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
5643 nofp->nof_d_rw_dw = 0;
5644 lck_mtx_unlock(&nofp->nof_lock);
5645 }
5646 }
5647 if (!error && nofp->nof_d_w_dw) {
5648 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
5649 if (!error) {
5650 lck_mtx_lock(&nofp->nof_lock);
5651 nofp->nof_w_dw += nofp->nof_d_w_dw;
5652 nofp->nof_d_w_dw = 0;
5653 lck_mtx_unlock(&nofp->nof_lock);
5654 }
5655 }
5656 if (!error && nofp->nof_d_r_dw) {
5657 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
5658 if (!error) {
5659 lck_mtx_lock(&nofp->nof_lock);
5660 nofp->nof_r_dw += nofp->nof_d_r_dw;
5661 nofp->nof_d_r_dw = 0;
5662 lck_mtx_unlock(&nofp->nof_lock);
5663 }
5664 }
5665 /* non-deny-mode opens may be reopened if no locks are held */
5666 if (!error && nofp->nof_d_rw) {
5667 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
5668 /* for some errors, we should just try reopening the file */
5669 if (nfs_mount_state_error_delegation_lost(error))
5670 reopen = error;
5671 if (!error || reopen) {
5672 lck_mtx_lock(&nofp->nof_lock);
5673 nofp->nof_rw += nofp->nof_d_rw;
5674 nofp->nof_d_rw = 0;
5675 lck_mtx_unlock(&nofp->nof_lock);
5676 }
5677 }
5678 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5679 if ((!error || reopen) && nofp->nof_d_w) {
5680 if (!error) {
5681 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
5682 /* for some errors, we should just try reopening the file */
5683 if (nfs_mount_state_error_delegation_lost(error))
5684 reopen = error;
5685 }
5686 if (!error || reopen) {
5687 lck_mtx_lock(&nofp->nof_lock);
5688 nofp->nof_w += nofp->nof_d_w;
5689 nofp->nof_d_w = 0;
5690 lck_mtx_unlock(&nofp->nof_lock);
5691 }
5692 }
5693 if ((!error || reopen) && nofp->nof_d_r) {
5694 if (!error) {
5695 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
5696 /* for some errors, we should just try reopening the file */
5697 if (nfs_mount_state_error_delegation_lost(error))
5698 reopen = error;
5699 }
5700 if (!error || reopen) {
5701 lck_mtx_lock(&nofp->nof_lock);
5702 nofp->nof_r += nofp->nof_d_r;
5703 nofp->nof_d_r = 0;
5704 lck_mtx_unlock(&nofp->nof_lock);
5705 }
5706 }
5707
5708 if (reopen) {
5709 /*
5710 * Any problems with the delegation probably indicates that we
5711 * should review/return all of our current delegation state.
5712 */
5713 if ((nmp = NFSTONMP(nofp->nof_np))) {
5714 nfs4_delegation_return_enqueue(nofp->nof_np);
5715 lck_mtx_lock(&nmp->nm_lock);
5716 nfs_need_recover(nmp, NFSERR_EXPIRED);
5717 lck_mtx_unlock(&nmp->nm_lock);
5718 }
5719 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
5720 /* just reopen the file on next access */
5721 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5722 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5723 lck_mtx_lock(&nofp->nof_lock);
5724 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
5725 lck_mtx_unlock(&nofp->nof_lock);
5726 return (0);
5727 }
5728 if (reopen)
5729 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5730 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5731 }
5732
5733 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
5734 /* claim delegated locks */
5735 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
5736 if (nlop->nlo_open_owner != noop)
5737 continue;
5738 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
5739 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5740 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5741 continue;
5742 /* skip non-delegated locks */
5743 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5744 continue;
5745 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
5746 if (error) {
5747 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5748 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5749 break;
5750 }
5751 // else {
5752 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5753 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5754 // }
5755 }
5756 if (error)
5757 break;
5758 }
5759 }
5760
5761 if (!error) /* all state claimed successfully! */
5762 return (0);
5763
5764 /* restart if it looks like a problem more than just losing the delegation */
5765 if (!nfs_mount_state_error_delegation_lost(error) &&
5766 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
5767 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5768 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np))))
5769 nfs_need_reconnect(nmp);
5770 return (error);
5771 }
5772
5773 /* delegated state lost (once held but now not claimable) */
5774 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5775
5776 /*
5777 * Any problems with the delegation probably indicates that we
5778 * should review/return all of our current delegation state.
5779 */
5780 if ((nmp = NFSTONMP(nofp->nof_np))) {
5781 nfs4_delegation_return_enqueue(nofp->nof_np);
5782 lck_mtx_lock(&nmp->nm_lock);
5783 nfs_need_recover(nmp, NFSERR_EXPIRED);
5784 lck_mtx_unlock(&nmp->nm_lock);
5785 }
5786
5787 /* revoke all open file state */
5788 nfs_revoke_open_state_for_node(nofp->nof_np);
5789
5790 return (error);
5791 }
5792
5793 /*
5794 * Release all open state for the given node.
5795 */
5796 void
5797 nfs_release_open_state_for_node(nfsnode_t np, int force)
5798 {
5799 struct nfsmount *nmp = NFSTONMP(np);
5800 struct nfs_open_file *nofp;
5801 struct nfs_file_lock *nflp, *nextnflp;
5802
5803 /* drop held locks */
5804 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
5805 /* skip dead & blocked lock requests */
5806 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5807 continue;
5808 /* send an unlock if not a delegated lock */
5809 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5810 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
5811 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
5812 /* kill/remove the lock */
5813 lck_mtx_lock(&np->n_openlock);
5814 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
5815 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
5816 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
5817 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
5818 if (nflp->nfl_blockcnt) {
5819 /* wake up anyone blocked on this lock */
5820 wakeup(nflp);
5821 } else {
5822 /* remove nflp from lock list and destroy */
5823 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
5824 nfs_file_lock_destroy(nflp);
5825 }
5826 lck_mtx_unlock(&np->n_openlock);
5827 }
5828
5829 lck_mtx_lock(&np->n_openlock);
5830
5831 /* drop all opens */
5832 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5833 if (nofp->nof_flags & NFS_OPEN_FILE_LOST)
5834 continue;
5835 /* mark open state as lost */
5836 lck_mtx_lock(&nofp->nof_lock);
5837 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5838 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
5839
5840 lck_mtx_unlock(&nofp->nof_lock);
5841 if (!force && nmp && (nmp->nm_vers >= NFS_VER4))
5842 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
5843 }
5844
5845 lck_mtx_unlock(&np->n_openlock);
5846 }
5847
5848 /*
5849 * State for a node has been lost, drop it, and revoke the node.
5850 * Attempt to return any state if possible in case the server
5851 * might somehow think we hold it.
5852 */
5853 void
5854 nfs_revoke_open_state_for_node(nfsnode_t np)
5855 {
5856 struct nfsmount *nmp;
5857
5858 /* mark node as needing to be revoked */
5859 nfs_node_lock_force(np);
5860 if (np->n_flag & NREVOKE) /* already revoked? */
5861 {
5862 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
5863 nfs_node_unlock(np);
5864 return;
5865 }
5866 np->n_flag |= NREVOKE;
5867 nfs_node_unlock(np);
5868
5869 nfs_release_open_state_for_node(np, 0);
5870 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
5871
5872 /* mark mount as needing a revoke scan and have the socket thread do it. */
5873 if ((nmp = NFSTONMP(np))) {
5874 lck_mtx_lock(&nmp->nm_lock);
5875 nmp->nm_state |= NFSSTA_REVOKE;
5876 nfs_mount_sock_thread_wake(nmp);
5877 lck_mtx_unlock(&nmp->nm_lock);
5878 }
5879 }
5880
5881 /*
5882 * Claim the delegated open combinations that each of this node's open files hold.
5883 */
5884 int
5885 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
5886 {
5887 struct nfs_open_file *nofp;
5888 int error = 0;
5889
5890 lck_mtx_lock(&np->n_openlock);
5891
5892 /* walk the open file list looking for opens with delegated state to claim */
5893 restart:
5894 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5895 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
5896 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
5897 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r)
5898 continue;
5899 lck_mtx_unlock(&np->n_openlock);
5900 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
5901 lck_mtx_lock(&np->n_openlock);
5902 if (error)
5903 break;
5904 goto restart;
5905 }
5906
5907 lck_mtx_unlock(&np->n_openlock);
5908
5909 return (error);
5910 }
5911
5912 /*
5913 * Mark a node as needed to have its delegation returned.
5914 * Queue it up on the delegation return queue.
5915 * Make sure the thread is running.
5916 */
5917 void
5918 nfs4_delegation_return_enqueue(nfsnode_t np)
5919 {
5920 struct nfsmount *nmp;
5921
5922 nmp = NFSTONMP(np);
5923 if (nfs_mount_gone(nmp))
5924 return;
5925
5926 lck_mtx_lock(&np->n_openlock);
5927 np->n_openflags |= N_DELEG_RETURN;
5928 lck_mtx_unlock(&np->n_openlock);
5929
5930 lck_mtx_lock(&nmp->nm_lock);
5931 if (np->n_dreturn.tqe_next == NFSNOLIST)
5932 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
5933 nfs_mount_sock_thread_wake(nmp);
5934 lck_mtx_unlock(&nmp->nm_lock);
5935 }
5936
5937 /*
5938 * return any delegation we may have for the given node
5939 */
5940 int
5941 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
5942 {
5943 struct nfsmount *nmp;
5944 fhandle_t fh;
5945 nfs_stateid dstateid;
5946 int error;
5947
5948 nmp = NFSTONMP(np);
5949 if (nfs_mount_gone(nmp))
5950 return (ENXIO);
5951
5952 /* first, make sure the node's marked for delegation return */
5953 lck_mtx_lock(&np->n_openlock);
5954 np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING);
5955 lck_mtx_unlock(&np->n_openlock);
5956
5957 /* make sure nobody else is using the delegation state */
5958 if ((error = nfs_open_state_set_busy(np, NULL)))
5959 goto out;
5960
5961 /* claim any delegated state */
5962 if ((error = nfs4_claim_delegated_state_for_node(np, flags)))
5963 goto out;
5964
5965 /* return the delegation */
5966 lck_mtx_lock(&np->n_openlock);
5967 dstateid = np->n_dstateid;
5968 fh.fh_len = np->n_fhsize;
5969 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
5970 lck_mtx_unlock(&np->n_openlock);
5971 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
5972 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5973 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
5974 lck_mtx_lock(&np->n_openlock);
5975 np->n_openflags &= ~N_DELEG_MASK;
5976 lck_mtx_lock(&nmp->nm_lock);
5977 if (np->n_dlink.tqe_next != NFSNOLIST) {
5978 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
5979 np->n_dlink.tqe_next = NFSNOLIST;
5980 }
5981 lck_mtx_unlock(&nmp->nm_lock);
5982 lck_mtx_unlock(&np->n_openlock);
5983 }
5984
5985 out:
5986 /* make sure it's no longer on the return queue and clear the return flags */
5987 lck_mtx_lock(&nmp->nm_lock);
5988 if (np->n_dreturn.tqe_next != NFSNOLIST) {
5989 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
5990 np->n_dreturn.tqe_next = NFSNOLIST;
5991 }
5992 lck_mtx_unlock(&nmp->nm_lock);
5993 lck_mtx_lock(&np->n_openlock);
5994 np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING);
5995 lck_mtx_unlock(&np->n_openlock);
5996
5997 if (error) {
5998 NP(np, "nfs4_delegation_return, error %d", error);
5999 if (error == ETIMEDOUT)
6000 nfs_need_reconnect(nmp);
6001 if (nfs_mount_state_error_should_restart(error)) {
6002 /* make sure recovery happens */
6003 lck_mtx_lock(&nmp->nm_lock);
6004 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6005 lck_mtx_unlock(&nmp->nm_lock);
6006 }
6007 }
6008
6009 nfs_open_state_clear_busy(np);
6010
6011 return (error);
6012 }
6013
6014 /*
6015 * RPC to return a delegation for a file handle
6016 */
6017 int
6018 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6019 {
6020 int error = 0, status, numops;
6021 uint64_t xid;
6022 struct nfsm_chain nmreq, nmrep;
6023 struct nfsreq_secinfo_args si;
6024
6025 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6026 nfsm_chain_null(&nmreq);
6027 nfsm_chain_null(&nmrep);
6028
6029 // PUTFH, DELEGRETURN
6030 numops = 2;
6031 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6032 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6033 numops--;
6034 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6035 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6036 numops--;
6037 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6038 nfsm_chain_add_stateid(error, &nmreq, sid);
6039 nfsm_chain_build_done(error, &nmreq);
6040 nfsm_assert(error, (numops == 0), EPROTO);
6041 nfsmout_if(error);
6042 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6043 nfsm_chain_skip_tag(error, &nmrep);
6044 nfsm_chain_get_32(error, &nmrep, numops);
6045 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6046 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6047 nfsmout:
6048 nfsm_chain_cleanup(&nmreq);
6049 nfsm_chain_cleanup(&nmrep);
6050 return (error);
6051 }
6052
6053
6054 /*
6055 * NFS read call.
6056 * Just call nfs_bioread() to do the work.
6057 *
6058 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6059 * without first calling VNOP_OPEN, so we make sure the file is open here.
6060 */
6061 int
6062 nfs_vnop_read(
6063 struct vnop_read_args /* {
6064 struct vnodeop_desc *a_desc;
6065 vnode_t a_vp;
6066 struct uio *a_uio;
6067 int a_ioflag;
6068 vfs_context_t a_context;
6069 } */ *ap)
6070 {
6071 vnode_t vp = ap->a_vp;
6072 vfs_context_t ctx = ap->a_context;
6073 nfsnode_t np;
6074 struct nfsmount *nmp;
6075 struct nfs_open_owner *noop;
6076 struct nfs_open_file *nofp;
6077 int error;
6078
6079 if (vnode_vtype(ap->a_vp) != VREG)
6080 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6081
6082 np = VTONFS(vp);
6083 nmp = NFSTONMP(np);
6084 if (nfs_mount_gone(nmp))
6085 return (ENXIO);
6086 if (np->n_flag & NREVOKE)
6087 return (EIO);
6088
6089 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6090 if (!noop)
6091 return (ENOMEM);
6092 restart:
6093 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6094 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6095 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6096 error = EIO;
6097 }
6098 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6099 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6100 nofp = NULL;
6101 if (!error)
6102 goto restart;
6103 }
6104 if (error) {
6105 nfs_open_owner_rele(noop);
6106 return (error);
6107 }
6108 /*
6109 * Since the read path is a hot path, if we already have
6110 * read access, lets go and try and do the read, without
6111 * busying the mount and open file node for this open owner.
6112 *
6113 * N.B. This is inherently racy w.r.t. an execve using
6114 * an already open file, in that the read at the end of
6115 * this routine will be racing with a potential close.
6116 * The code below ultimately has the same problem. In practice
6117 * this does not seem to be an issue.
6118 */
6119 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6120 nfs_open_owner_rele(noop);
6121 goto do_read;
6122 }
6123 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6124 if (error) {
6125 nfs_open_owner_rele(noop);
6126 return (error);
6127 }
6128 /*
6129 * If we don't have a file already open with the access we need (read) then
6130 * we need to open one. Otherwise we just co-opt an open. We might not already
6131 * have access because we're trying to read the first page of the
6132 * file for execve.
6133 */
6134 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6135 if (error) {
6136 nfs_mount_state_in_use_end(nmp, 0);
6137 nfs_open_owner_rele(noop);
6138 return (error);
6139 }
6140 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6141 /* we don't have the file open, so open it for read access if we're not denied */
6142 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6143 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6144 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6145 }
6146 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6147 nfs_open_file_clear_busy(nofp);
6148 nfs_mount_state_in_use_end(nmp, 0);
6149 nfs_open_owner_rele(noop);
6150 return (EPERM);
6151 }
6152 if (np->n_flag & NREVOKE) {
6153 error = EIO;
6154 nfs_open_file_clear_busy(nofp);
6155 nfs_mount_state_in_use_end(nmp, 0);
6156 nfs_open_owner_rele(noop);
6157 return (error);
6158 }
6159 if (nmp->nm_vers < NFS_VER4) {
6160 /* NFS v2/v3 opens are always allowed - so just add it. */
6161 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6162 } else {
6163 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6164 }
6165 if (!error)
6166 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6167 }
6168 if (nofp)
6169 nfs_open_file_clear_busy(nofp);
6170 if (nfs_mount_state_in_use_end(nmp, error)) {
6171 nofp = NULL;
6172 goto restart;
6173 }
6174 nfs_open_owner_rele(noop);
6175 if (error)
6176 return (error);
6177 do_read:
6178 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
6179 }
6180
6181 /*
6182 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6183 * Files are created using the NFSv4 OPEN RPC. So we must open the
6184 * file to create it and then close it.
6185 */
6186 int
6187 nfs4_vnop_create(
6188 struct vnop_create_args /* {
6189 struct vnodeop_desc *a_desc;
6190 vnode_t a_dvp;
6191 vnode_t *a_vpp;
6192 struct componentname *a_cnp;
6193 struct vnode_attr *a_vap;
6194 vfs_context_t a_context;
6195 } */ *ap)
6196 {
6197 vfs_context_t ctx = ap->a_context;
6198 struct componentname *cnp = ap->a_cnp;
6199 struct vnode_attr *vap = ap->a_vap;
6200 vnode_t dvp = ap->a_dvp;
6201 vnode_t *vpp = ap->a_vpp;
6202 struct nfsmount *nmp;
6203 nfsnode_t np;
6204 int error = 0, busyerror = 0, accessMode, denyMode;
6205 struct nfs_open_owner *noop = NULL;
6206 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6207
6208 nmp = VTONMP(dvp);
6209 if (nfs_mount_gone(nmp))
6210 return (ENXIO);
6211
6212 if (vap)
6213 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6214
6215 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6216 if (!noop)
6217 return (ENOMEM);
6218
6219 restart:
6220 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6221 if (error) {
6222 nfs_open_owner_rele(noop);
6223 return (error);
6224 }
6225
6226 /* grab a provisional, nodeless open file */
6227 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6228 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6229 printf("nfs_vnop_create: LOST\n");
6230 error = EIO;
6231 }
6232 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6233 /* This shouldn't happen given that this is a new, nodeless nofp */
6234 nfs_mount_state_in_use_end(nmp, 0);
6235 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6236 nfs_open_file_destroy(newnofp);
6237 newnofp = NULL;
6238 if (!error)
6239 goto restart;
6240 }
6241 if (!error)
6242 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6243 if (error) {
6244 if (newnofp)
6245 nfs_open_file_destroy(newnofp);
6246 newnofp = NULL;
6247 goto out;
6248 }
6249
6250 /*
6251 * We're just trying to create the file.
6252 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6253 */
6254 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6255 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6256
6257 /* Do the open/create */
6258 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6259 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6260 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6261 /*
6262 * Hmm... it looks like we may have a situation where the request was
6263 * retransmitted because we didn't get the first response which successfully
6264 * created/opened the file and then the second time we were denied the open
6265 * because the mode the file was created with doesn't allow write access.
6266 *
6267 * We'll try to work around this by temporarily updating the mode and
6268 * retrying the open.
6269 */
6270 struct vnode_attr vattr;
6271
6272 /* first make sure it's there */
6273 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6274 if (!error2 && np) {
6275 nfs_node_unlock(np);
6276 *vpp = NFSTOV(np);
6277 if (vnode_vtype(NFSTOV(np)) == VREG) {
6278 VATTR_INIT(&vattr);
6279 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6280 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6281 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6282 VATTR_INIT(&vattr);
6283 VATTR_SET(&vattr, va_mode, vap->va_mode);
6284 nfs4_setattr_rpc(np, &vattr, ctx);
6285 if (!error2)
6286 error = 0;
6287 }
6288 }
6289 if (error) {
6290 vnode_put(*vpp);
6291 *vpp = NULL;
6292 }
6293 }
6294 }
6295 if (!error && !*vpp) {
6296 printf("nfs4_open_rpc returned without a node?\n");
6297 /* Hmmm... with no node, we have no filehandle and can't close it */
6298 error = EIO;
6299 }
6300 if (error) {
6301 /* need to cleanup our temporary nofp */
6302 nfs_open_file_clear_busy(newnofp);
6303 nfs_open_file_destroy(newnofp);
6304 newnofp = NULL;
6305 goto out;
6306 }
6307 /* After we have a node, add our open file struct to the node */
6308 np = VTONFS(*vpp);
6309 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6310 nofp = newnofp;
6311 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6312 if (error) {
6313 /* This shouldn't happen, because we passed in a new nofp to use. */
6314 printf("nfs_open_file_find_internal failed! %d\n", error);
6315 goto out;
6316 } else if (nofp != newnofp) {
6317 /*
6318 * Hmm... an open file struct already exists.
6319 * Mark the existing one busy and merge our open into it.
6320 * Then destroy the one we created.
6321 * Note: there's no chance of an open confict because the
6322 * open has already been granted.
6323 */
6324 busyerror = nfs_open_file_set_busy(nofp, NULL);
6325 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6326 nofp->nof_stateid = newnofp->nof_stateid;
6327 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
6328 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6329 nfs_open_file_clear_busy(newnofp);
6330 nfs_open_file_destroy(newnofp);
6331 }
6332 newnofp = NULL;
6333 /* mark the node as holding a create-initiated open */
6334 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6335 nofp->nof_creator = current_thread();
6336 out:
6337 if (nofp && !busyerror)
6338 nfs_open_file_clear_busy(nofp);
6339 if (nfs_mount_state_in_use_end(nmp, error)) {
6340 nofp = newnofp = NULL;
6341 busyerror = 0;
6342 goto restart;
6343 }
6344 if (noop)
6345 nfs_open_owner_rele(noop);
6346 return (error);
6347 }
6348
6349 /*
6350 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6351 */
6352 int
6353 nfs4_create_rpc(
6354 vfs_context_t ctx,
6355 nfsnode_t dnp,
6356 struct componentname *cnp,
6357 struct vnode_attr *vap,
6358 int type,
6359 char *link,
6360 nfsnode_t *npp)
6361 {
6362 struct nfsmount *nmp;
6363 struct nfs_vattr nvattr;
6364 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6365 int nfsvers, namedattrs, numops;
6366 u_int64_t xid, savedxid = 0;
6367 nfsnode_t np = NULL;
6368 vnode_t newvp = NULL;
6369 struct nfsm_chain nmreq, nmrep;
6370 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6371 const char *tag;
6372 nfs_specdata sd;
6373 fhandle_t fh;
6374 struct nfsreq rq, *req = &rq;
6375 struct nfs_dulookup dul;
6376 struct nfsreq_secinfo_args si;
6377
6378 nmp = NFSTONMP(dnp);
6379 if (nfs_mount_gone(nmp))
6380 return (ENXIO);
6381 nfsvers = nmp->nm_vers;
6382 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6383 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6384 return (EINVAL);
6385
6386 sd.specdata1 = sd.specdata2 = 0;
6387
6388 switch (type) {
6389 case NFLNK:
6390 tag = "symlink";
6391 break;
6392 case NFBLK:
6393 case NFCHR:
6394 tag = "mknod";
6395 if (!VATTR_IS_ACTIVE(vap, va_rdev))
6396 return (EINVAL);
6397 sd.specdata1 = major(vap->va_rdev);
6398 sd.specdata2 = minor(vap->va_rdev);
6399 break;
6400 case NFSOCK:
6401 case NFFIFO:
6402 tag = "mknod";
6403 break;
6404 case NFDIR:
6405 tag = "mkdir";
6406 break;
6407 default:
6408 return (EINVAL);
6409 }
6410
6411 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6412
6413 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6414 if (!namedattrs)
6415 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6416
6417 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6418 NVATTR_INIT(&nvattr);
6419 nfsm_chain_null(&nmreq);
6420 nfsm_chain_null(&nmrep);
6421
6422 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6423 numops = 6;
6424 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6425 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6426 numops--;
6427 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6428 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6429 numops--;
6430 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6431 numops--;
6432 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6433 nfsm_chain_add_32(error, &nmreq, type);
6434 if (type == NFLNK) {
6435 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6436 } else if ((type == NFBLK) || (type == NFCHR)) {
6437 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6438 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6439 }
6440 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6441 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6442 numops--;
6443 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6444 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6445 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6446 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6447 numops--;
6448 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6449 numops--;
6450 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6451 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6452 nfsm_chain_build_done(error, &nmreq);
6453 nfsm_assert(error, (numops == 0), EPROTO);
6454 nfsmout_if(error);
6455
6456 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6457 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6458 if (!error) {
6459 if (!namedattrs)
6460 nfs_dulookup_start(&dul, dnp, ctx);
6461 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6462 }
6463
6464 if ((lockerror = nfs_node_lock(dnp)))
6465 error = lockerror;
6466 nfsm_chain_skip_tag(error, &nmrep);
6467 nfsm_chain_get_32(error, &nmrep, numops);
6468 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6469 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6470 nfsmout_if(error);
6471 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6472 nfsm_chain_check_change_info(error, &nmrep, dnp);
6473 bmlen = NFS_ATTR_BITMAP_LEN;
6474 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6475 /* At this point if we have no error, the object was created. */
6476 /* if we don't get attributes, then we should lookitup. */
6477 create_error = error;
6478 nfsmout_if(error);
6479 nfs_vattr_set_supported(bitmap, vap);
6480 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6481 nfsmout_if(error);
6482 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6483 nfsmout_if(error);
6484 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6485 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6486 error = EBADRPC;
6487 goto nfsmout;
6488 }
6489 /* directory attributes: if we don't get them, make sure to invalidate */
6490 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6491 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6492 savedxid = xid;
6493 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
6494 if (error)
6495 NATTRINVALIDATE(dnp);
6496
6497 nfsmout:
6498 nfsm_chain_cleanup(&nmreq);
6499 nfsm_chain_cleanup(&nmrep);
6500
6501 if (!lockerror) {
6502 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
6503 dnp->n_flag &= ~NNEGNCENTRIES;
6504 cache_purge_negatives(NFSTOV(dnp));
6505 }
6506 dnp->n_flag |= NMODIFIED;
6507 nfs_node_unlock(dnp);
6508 /* nfs_getattr() will check changed and purge caches */
6509 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6510 }
6511
6512 if (!error && fh.fh_len) {
6513 /* create the vnode with the filehandle and attributes */
6514 xid = savedxid;
6515 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6516 if (!error)
6517 newvp = NFSTOV(np);
6518 }
6519 NVATTR_CLEANUP(&nvattr);
6520
6521 if (!namedattrs)
6522 nfs_dulookup_finish(&dul, dnp, ctx);
6523
6524 /*
6525 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6526 * if we can succeed in looking up the object.
6527 */
6528 if ((create_error == EEXIST) || (!create_error && !newvp)) {
6529 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6530 if (!error) {
6531 newvp = NFSTOV(np);
6532 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers))
6533 error = EEXIST;
6534 }
6535 }
6536 if (!busyerror)
6537 nfs_node_clear_busy(dnp);
6538 if (error) {
6539 if (newvp) {
6540 nfs_node_unlock(np);
6541 vnode_put(newvp);
6542 }
6543 } else {
6544 nfs_node_unlock(np);
6545 *npp = np;
6546 }
6547 return (error);
6548 }
6549
6550 int
6551 nfs4_vnop_mknod(
6552 struct vnop_mknod_args /* {
6553 struct vnodeop_desc *a_desc;
6554 vnode_t a_dvp;
6555 vnode_t *a_vpp;
6556 struct componentname *a_cnp;
6557 struct vnode_attr *a_vap;
6558 vfs_context_t a_context;
6559 } */ *ap)
6560 {
6561 nfsnode_t np = NULL;
6562 struct nfsmount *nmp;
6563 int error;
6564
6565 nmp = VTONMP(ap->a_dvp);
6566 if (nfs_mount_gone(nmp))
6567 return (ENXIO);
6568
6569 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
6570 return (EINVAL);
6571 switch (ap->a_vap->va_type) {
6572 case VBLK:
6573 case VCHR:
6574 case VFIFO:
6575 case VSOCK:
6576 break;
6577 default:
6578 return (ENOTSUP);
6579 }
6580
6581 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6582 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
6583 if (!error)
6584 *ap->a_vpp = NFSTOV(np);
6585 return (error);
6586 }
6587
6588 int
6589 nfs4_vnop_mkdir(
6590 struct vnop_mkdir_args /* {
6591 struct vnodeop_desc *a_desc;
6592 vnode_t a_dvp;
6593 vnode_t *a_vpp;
6594 struct componentname *a_cnp;
6595 struct vnode_attr *a_vap;
6596 vfs_context_t a_context;
6597 } */ *ap)
6598 {
6599 nfsnode_t np = NULL;
6600 int error;
6601
6602 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6603 NFDIR, NULL, &np);
6604 if (!error)
6605 *ap->a_vpp = NFSTOV(np);
6606 return (error);
6607 }
6608
6609 int
6610 nfs4_vnop_symlink(
6611 struct vnop_symlink_args /* {
6612 struct vnodeop_desc *a_desc;
6613 vnode_t a_dvp;
6614 vnode_t *a_vpp;
6615 struct componentname *a_cnp;
6616 struct vnode_attr *a_vap;
6617 char *a_target;
6618 vfs_context_t a_context;
6619 } */ *ap)
6620 {
6621 nfsnode_t np = NULL;
6622 int error;
6623
6624 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6625 NFLNK, ap->a_target, &np);
6626 if (!error)
6627 *ap->a_vpp = NFSTOV(np);
6628 return (error);
6629 }
6630
6631 int
6632 nfs4_vnop_link(
6633 struct vnop_link_args /* {
6634 struct vnodeop_desc *a_desc;
6635 vnode_t a_vp;
6636 vnode_t a_tdvp;
6637 struct componentname *a_cnp;
6638 vfs_context_t a_context;
6639 } */ *ap)
6640 {
6641 vfs_context_t ctx = ap->a_context;
6642 vnode_t vp = ap->a_vp;
6643 vnode_t tdvp = ap->a_tdvp;
6644 struct componentname *cnp = ap->a_cnp;
6645 int error = 0, lockerror = ENOENT, status;
6646 struct nfsmount *nmp;
6647 nfsnode_t np = VTONFS(vp);
6648 nfsnode_t tdnp = VTONFS(tdvp);
6649 int nfsvers, numops;
6650 u_int64_t xid, savedxid;
6651 struct nfsm_chain nmreq, nmrep;
6652 struct nfsreq_secinfo_args si;
6653
6654 if (vnode_mount(vp) != vnode_mount(tdvp))
6655 return (EXDEV);
6656
6657 nmp = VTONMP(vp);
6658 if (nfs_mount_gone(nmp))
6659 return (ENXIO);
6660 nfsvers = nmp->nm_vers;
6661 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6662 return (EINVAL);
6663 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6664 return (EINVAL);
6665
6666 /*
6667 * Push all writes to the server, so that the attribute cache
6668 * doesn't get "out of sync" with the server.
6669 * XXX There should be a better way!
6670 */
6671 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
6672
6673 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
6674 return (error);
6675
6676 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6677 nfsm_chain_null(&nmreq);
6678 nfsm_chain_null(&nmrep);
6679
6680 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6681 numops = 7;
6682 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
6683 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6684 numops--;
6685 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6686 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6687 numops--;
6688 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6689 numops--;
6690 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6691 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
6692 numops--;
6693 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
6694 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6695 numops--;
6696 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6697 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
6698 numops--;
6699 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6700 numops--;
6701 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6702 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6703 nfsm_chain_build_done(error, &nmreq);
6704 nfsm_assert(error, (numops == 0), EPROTO);
6705 nfsmout_if(error);
6706 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
6707
6708 if ((lockerror = nfs_node_lock2(tdnp, np))) {
6709 error = lockerror;
6710 goto nfsmout;
6711 }
6712 nfsm_chain_skip_tag(error, &nmrep);
6713 nfsm_chain_get_32(error, &nmrep, numops);
6714 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6715 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6716 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6717 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
6718 nfsm_chain_check_change_info(error, &nmrep, tdnp);
6719 /* directory attributes: if we don't get them, make sure to invalidate */
6720 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6721 savedxid = xid;
6722 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
6723 if (error)
6724 NATTRINVALIDATE(tdnp);
6725 /* link attributes: if we don't get them, make sure to invalidate */
6726 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6727 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6728 xid = savedxid;
6729 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6730 if (error)
6731 NATTRINVALIDATE(np);
6732 nfsmout:
6733 nfsm_chain_cleanup(&nmreq);
6734 nfsm_chain_cleanup(&nmrep);
6735 if (!lockerror)
6736 tdnp->n_flag |= NMODIFIED;
6737 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6738 if (error == EEXIST)
6739 error = 0;
6740 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
6741 tdnp->n_flag &= ~NNEGNCENTRIES;
6742 cache_purge_negatives(tdvp);
6743 }
6744 if (!lockerror)
6745 nfs_node_unlock2(tdnp, np);
6746 nfs_node_clear_busy2(tdnp, np);
6747 return (error);
6748 }
6749
6750 int
6751 nfs4_vnop_rmdir(
6752 struct vnop_rmdir_args /* {
6753 struct vnodeop_desc *a_desc;
6754 vnode_t a_dvp;
6755 vnode_t a_vp;
6756 struct componentname *a_cnp;
6757 vfs_context_t a_context;
6758 } */ *ap)
6759 {
6760 vfs_context_t ctx = ap->a_context;
6761 vnode_t vp = ap->a_vp;
6762 vnode_t dvp = ap->a_dvp;
6763 struct componentname *cnp = ap->a_cnp;
6764 struct nfsmount *nmp;
6765 int error = 0, namedattrs;
6766 nfsnode_t np = VTONFS(vp);
6767 nfsnode_t dnp = VTONFS(dvp);
6768 struct nfs_dulookup dul;
6769
6770 if (vnode_vtype(vp) != VDIR)
6771 return (EINVAL);
6772
6773 nmp = NFSTONMP(dnp);
6774 if (nfs_mount_gone(nmp))
6775 return (ENXIO);
6776 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6777
6778 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
6779 return (error);
6780
6781 if (!namedattrs) {
6782 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6783 nfs_dulookup_start(&dul, dnp, ctx);
6784 }
6785
6786 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6787 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6788
6789 nfs_name_cache_purge(dnp, np, cnp, ctx);
6790 /* nfs_getattr() will check changed and purge caches */
6791 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6792 if (!namedattrs)
6793 nfs_dulookup_finish(&dul, dnp, ctx);
6794 nfs_node_clear_busy2(dnp, np);
6795
6796 /*
6797 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6798 */
6799 if (error == ENOENT)
6800 error = 0;
6801 if (!error) {
6802 /*
6803 * remove nfsnode from hash now so we can't accidentally find it
6804 * again if another object gets created with the same filehandle
6805 * before this vnode gets reclaimed
6806 */
6807 lck_mtx_lock(nfs_node_hash_mutex);
6808 if (np->n_hflag & NHHASHED) {
6809 LIST_REMOVE(np, n_hash);
6810 np->n_hflag &= ~NHHASHED;
6811 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
6812 }
6813 lck_mtx_unlock(nfs_node_hash_mutex);
6814 }
6815 return (error);
6816 }
6817
6818 /*
6819 * NFSv4 Named Attributes
6820 *
6821 * Both the extended attributes interface and the named streams interface
6822 * are backed by NFSv4 named attributes. The implementations for both use
6823 * a common set of routines in an attempt to reduce code duplication, to
6824 * increase efficiency, to increase caching of both names and data, and to
6825 * confine the complexity.
6826 *
6827 * Each NFS node caches its named attribute directory's file handle.
6828 * The directory nodes for the named attribute directories are handled
6829 * exactly like regular directories (with a couple minor exceptions).
6830 * Named attribute nodes are also treated as much like regular files as
6831 * possible.
6832 *
6833 * Most of the heavy lifting is done by nfs4_named_attr_get().
6834 */
6835
6836 /*
6837 * Get the given node's attribute directory node.
6838 * If !fetch, then only return a cached node.
6839 * Otherwise, we will attempt to fetch the node from the server.
6840 * (Note: the node should be marked busy.)
6841 */
6842 nfsnode_t
6843 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
6844 {
6845 nfsnode_t adnp = NULL;
6846 struct nfsmount *nmp;
6847 int error = 0, status, numops;
6848 struct nfsm_chain nmreq, nmrep;
6849 u_int64_t xid;
6850 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
6851 fhandle_t fh;
6852 struct nfs_vattr nvattr;
6853 struct componentname cn;
6854 struct nfsreq rq, *req = &rq;
6855 struct nfsreq_secinfo_args si;
6856
6857 nmp = NFSTONMP(np);
6858 if (nfs_mount_gone(nmp))
6859 return (NULL);
6860 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6861 return (NULL);
6862
6863 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6864 NVATTR_INIT(&nvattr);
6865 nfsm_chain_null(&nmreq);
6866 nfsm_chain_null(&nmrep);
6867
6868 bzero(&cn, sizeof(cn));
6869 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
6870 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
6871 cn.cn_nameiop = LOOKUP;
6872
6873 if (np->n_attrdirfh) {
6874 // XXX can't set parent correctly (to np) yet
6875 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh,
6876 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
6877 if (adnp)
6878 goto nfsmout;
6879 }
6880 if (!fetch) {
6881 error = ENOENT;
6882 goto nfsmout;
6883 }
6884
6885 // PUTFH, OPENATTR, GETATTR
6886 numops = 3;
6887 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
6888 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6889 numops--;
6890 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6891 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
6892 numops--;
6893 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
6894 nfsm_chain_add_32(error, &nmreq, 0);
6895 numops--;
6896 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6897 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6898 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6899 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
6900 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6901 nfsm_chain_build_done(error, &nmreq);
6902 nfsm_assert(error, (numops == 0), EPROTO);
6903 nfsmout_if(error);
6904 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6905 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6906 if (!error)
6907 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6908
6909 nfsm_chain_skip_tag(error, &nmrep);
6910 nfsm_chain_get_32(error, &nmrep, numops);
6911 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6912 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
6913 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6914 nfsmout_if(error);
6915 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6916 nfsmout_if(error);
6917 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
6918 error = ENOENT;
6919 goto nfsmout;
6920 }
6921 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
6922 /* (re)allocate attrdir fh buffer */
6923 if (np->n_attrdirfh)
6924 FREE(np->n_attrdirfh, M_TEMP);
6925 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
6926 }
6927 if (!np->n_attrdirfh) {
6928 error = ENOMEM;
6929 goto nfsmout;
6930 }
6931 /* cache the attrdir fh in the node */
6932 *np->n_attrdirfh = fh.fh_len;
6933 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
6934 /* create node for attrdir */
6935 // XXX can't set parent correctly (to np) yet
6936 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
6937 nfsmout:
6938 NVATTR_CLEANUP(&nvattr);
6939 nfsm_chain_cleanup(&nmreq);
6940 nfsm_chain_cleanup(&nmrep);
6941
6942 if (adnp) {
6943 /* sanity check that this node is an attribute directory */
6944 if (adnp->n_vattr.nva_type != VDIR)
6945 error = EINVAL;
6946 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
6947 error = EINVAL;
6948 nfs_node_unlock(adnp);
6949 if (error)
6950 vnode_put(NFSTOV(adnp));
6951 }
6952 return (error ? NULL : adnp);
6953 }
6954
6955 /*
6956 * Get the given node's named attribute node for the name given.
6957 *
6958 * In an effort to increase the performance of named attribute access, we try
6959 * to reduce server requests by doing the following:
6960 *
6961 * - cache the node's named attribute directory file handle in the node
6962 * - maintain a directory vnode for the attribute directory
6963 * - use name cache entries (positive and negative) to speed up lookups
6964 * - optionally open the named attribute (with the given accessMode) in the same RPC
6965 * - combine attribute directory retrieval with the lookup/open RPC
6966 * - optionally prefetch the named attribute's first block of data in the same RPC
6967 *
6968 * Also, in an attempt to reduce the number of copies/variations of this code,
6969 * parts of the RPC building/processing code are conditionalized on what is
6970 * needed for any particular request (openattr, lookup vs. open, read).
6971 *
6972 * Note that because we may not have the attribute directory node when we start
6973 * the lookup/open, we lock both the node and the attribute directory node.
6974 */
6975
6976 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6977 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6978 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6979 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6980
6981 int
6982 nfs4_named_attr_get(
6983 nfsnode_t np,
6984 struct componentname *cnp,
6985 uint32_t accessMode,
6986 int flags,
6987 vfs_context_t ctx,
6988 nfsnode_t *anpp,
6989 struct nfs_open_file **nofpp)
6990 {
6991 struct nfsmount *nmp;
6992 int error = 0, open_error = EIO;
6993 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
6994 int create, guarded, prefetch, truncate, noopbusy = 0;
6995 int open, status, numops, hadattrdir, negnamecache;
6996 struct nfs_vattr nvattr;
6997 struct vnode_attr vattr;
6998 nfsnode_t adnp = NULL, anp = NULL;
6999 vnode_t avp = NULL;
7000 u_int64_t xid, savedxid = 0;
7001 struct nfsm_chain nmreq, nmrep;
7002 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7003 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7004 nfs_stateid stateid, dstateid;
7005 fhandle_t fh;
7006 struct nfs_open_owner *noop = NULL;
7007 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7008 struct vnop_access_args naa;
7009 thread_t thd;
7010 kauth_cred_t cred;
7011 struct timeval now;
7012 char sbuf[64], *s;
7013 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7014 struct kauth_ace ace;
7015 struct nfsreq rq, *req = &rq;
7016 struct nfsreq_secinfo_args si;
7017
7018 *anpp = NULL;
7019 fh.fh_len = 0;
7020 rflags = delegation = recall = eof = rlen = retlen = 0;
7021 ace.ace_flags = 0;
7022 s = sbuf;
7023 slen = sizeof(sbuf);
7024
7025 nmp = NFSTONMP(np);
7026 if (nfs_mount_gone(nmp))
7027 return (ENXIO);
7028 NVATTR_INIT(&nvattr);
7029 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7030 thd = vfs_context_thread(ctx);
7031 cred = vfs_context_ucred(ctx);
7032 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7033 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7034 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7035 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7036
7037 if (!create) {
7038 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7039 if (error)
7040 return (error);
7041 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7042 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7043 return (ENOATTR);
7044 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7045 /* shouldn't happen... but just be safe */
7046 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7047 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7048 }
7049 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7050 if (open) {
7051 /*
7052 * We're trying to open the file.
7053 * We'll create/open it with the given access mode,
7054 * and set NFS_OPEN_FILE_CREATE.
7055 */
7056 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7057 if (prefetch && guarded)
7058 prefetch = 0; /* no sense prefetching data that can't be there */
7059
7060 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7061 if (!noop)
7062 return (ENOMEM);
7063 }
7064
7065 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7066 return (error);
7067
7068 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7069 hadattrdir = (adnp != NULL);
7070 if (prefetch) {
7071 microuptime(&now);
7072 /* use the special state ID because we don't have a real one to send */
7073 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7074 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7075 }
7076 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7077 nfsm_chain_null(&nmreq);
7078 nfsm_chain_null(&nmrep);
7079
7080 if (hadattrdir) {
7081 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx))))
7082 goto nfsmout;
7083 /* nfs_getattr() will check changed and purge caches */
7084 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7085 nfsmout_if(error);
7086 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7087 switch (error) {
7088 case ENOENT:
7089 /* negative cache entry */
7090 goto nfsmout;
7091 case 0:
7092 /* cache miss */
7093 /* try dir buf cache lookup */
7094 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7095 if (!error && anp) {
7096 /* dir buf cache hit */
7097 *anpp = anp;
7098 error = -1;
7099 }
7100 if (error != -1) /* cache miss */
7101 break;
7102 /* FALLTHROUGH */
7103 case -1:
7104 /* cache hit, not really an error */
7105 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7106 if (!anp && avp)
7107 *anpp = anp = VTONFS(avp);
7108
7109 nfs_node_clear_busy(adnp);
7110 adbusyerror = ENOENT;
7111
7112 /* check for directory access */
7113 naa.a_desc = &vnop_access_desc;
7114 naa.a_vp = NFSTOV(adnp);
7115 naa.a_action = KAUTH_VNODE_SEARCH;
7116 naa.a_context = ctx;
7117
7118 /* compute actual success/failure based on accessibility */
7119 error = nfs_vnop_access(&naa);
7120 /* FALLTHROUGH */
7121 default:
7122 /* we either found it, or hit an error */
7123 if (!error && guarded) {
7124 /* found cached entry but told not to use it */
7125 error = EEXIST;
7126 vnode_put(NFSTOV(anp));
7127 *anpp = anp = NULL;
7128 }
7129 /* we're done if error or we don't need to open */
7130 if (error || !open)
7131 goto nfsmout;
7132 /* no error and we need to open... */
7133 }
7134 }
7135
7136 if (open) {
7137 restart:
7138 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7139 if (error) {
7140 nfs_open_owner_rele(noop);
7141 noop = NULL;
7142 goto nfsmout;
7143 }
7144 inuse = 1;
7145
7146 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7147 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7148 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7149 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7150 error = EIO;
7151 }
7152 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7153 nfs_mount_state_in_use_end(nmp, 0);
7154 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7155 nfs_open_file_destroy(newnofp);
7156 newnofp = NULL;
7157 if (!error)
7158 goto restart;
7159 }
7160 if (!error)
7161 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7162 if (error) {
7163 if (newnofp)
7164 nfs_open_file_destroy(newnofp);
7165 newnofp = NULL;
7166 goto nfsmout;
7167 }
7168 if (anp) {
7169 /*
7170 * We already have the node. So we just need to open
7171 * it - which we may be able to do with a delegation.
7172 */
7173 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7174 if (!error) {
7175 /* open succeeded, so our open file is no longer temporary */
7176 nofp = newnofp;
7177 nofpbusyerror = 0;
7178 newnofp = NULL;
7179 if (nofpp)
7180 *nofpp = nofp;
7181 }
7182 goto nfsmout;
7183 }
7184 }
7185
7186 /*
7187 * We either don't have the attrdir or we didn't find the attribute
7188 * in the name cache, so we need to talk to the server.
7189 *
7190 * If we don't have the attrdir, we'll need to ask the server for that too.
7191 * If the caller is requesting that the attribute be created, we need to
7192 * make sure the attrdir is created.
7193 * The caller may also request that the first block of an existing attribute
7194 * be retrieved at the same time.
7195 */
7196
7197 if (open) {
7198 /* need to mark the open owner busy during the RPC */
7199 if ((error = nfs_open_owner_set_busy(noop, thd)))
7200 goto nfsmout;
7201 noopbusy = 1;
7202 }
7203
7204 /*
7205 * We'd like to get updated post-open/lookup attributes for the
7206 * directory and we may also want to prefetch some data via READ.
7207 * We'd like the READ results to be last so that we can leave the
7208 * data in the mbufs until the end.
7209 *
7210 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7211 */
7212 numops = 5;
7213 if (!hadattrdir)
7214 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7215 if (prefetch)
7216 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7217 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7218 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7219 if (hadattrdir) {
7220 numops--;
7221 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7222 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7223 } else {
7224 numops--;
7225 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7226 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7227 numops--;
7228 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7229 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7230 numops--;
7231 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7233 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7234 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7235 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7236 }
7237 if (open) {
7238 numops--;
7239 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7240 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7241 nfsm_chain_add_32(error, &nmreq, accessMode);
7242 nfsm_chain_add_32(error, &nmreq, denyMode);
7243 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7244 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7245 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7246 nfsm_chain_add_32(error, &nmreq, create);
7247 if (create) {
7248 nfsm_chain_add_32(error, &nmreq, guarded);
7249 VATTR_INIT(&vattr);
7250 if (truncate)
7251 VATTR_SET(&vattr, va_data_size, 0);
7252 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7253 }
7254 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7255 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7256 } else {
7257 numops--;
7258 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7259 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7260 }
7261 numops--;
7262 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7263 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7264 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7265 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7266 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7267 if (prefetch) {
7268 numops--;
7269 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7270 }
7271 if (hadattrdir) {
7272 numops--;
7273 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7274 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7275 } else {
7276 numops--;
7277 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7278 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7279 numops--;
7280 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7281 nfsm_chain_add_32(error, &nmreq, 0);
7282 }
7283 numops--;
7284 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7285 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7286 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7287 if (prefetch) {
7288 numops--;
7289 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7290 numops--;
7291 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7292 VATTR_INIT(&vattr);
7293 VATTR_SET(&vattr, va_data_size, 0);
7294 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7295 numops--;
7296 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7297 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7298 nfsm_chain_add_64(error, &nmreq, 0);
7299 nfsm_chain_add_32(error, &nmreq, rlen);
7300 }
7301 nfsm_chain_build_done(error, &nmreq);
7302 nfsm_assert(error, (numops == 0), EPROTO);
7303 nfsmout_if(error);
7304 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7305 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7306 if (!error)
7307 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7308
7309 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp))))
7310 error = adlockerror;
7311 savedxid = xid;
7312 nfsm_chain_skip_tag(error, &nmrep);
7313 nfsm_chain_get_32(error, &nmrep, numops);
7314 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7315 if (!hadattrdir) {
7316 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7317 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7318 nfsmout_if(error);
7319 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7320 nfsmout_if(error);
7321 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7322 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7323 /* (re)allocate attrdir fh buffer */
7324 if (np->n_attrdirfh)
7325 FREE(np->n_attrdirfh, M_TEMP);
7326 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
7327 }
7328 if (np->n_attrdirfh) {
7329 /* remember the attrdir fh in the node */
7330 *np->n_attrdirfh = fh.fh_len;
7331 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
7332 /* create busied node for attrdir */
7333 struct componentname cn;
7334 bzero(&cn, sizeof(cn));
7335 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7336 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7337 cn.cn_nameiop = LOOKUP;
7338 // XXX can't set parent correctly (to np) yet
7339 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7340 if (!error) {
7341 adlockerror = 0;
7342 /* set the node busy */
7343 SET(adnp->n_flag, NBUSY);
7344 adbusyerror = 0;
7345 }
7346 /* if no adnp, oh well... */
7347 error = 0;
7348 }
7349 }
7350 NVATTR_CLEANUP(&nvattr);
7351 fh.fh_len = 0;
7352 }
7353 if (open) {
7354 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7355 nfs_owner_seqid_increment(noop, NULL, error);
7356 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7357 nfsm_chain_check_change_info(error, &nmrep, adnp);
7358 nfsm_chain_get_32(error, &nmrep, rflags);
7359 bmlen = NFS_ATTR_BITMAP_LEN;
7360 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7361 nfsm_chain_get_32(error, &nmrep, delegation);
7362 if (!error)
7363 switch (delegation) {
7364 case NFS_OPEN_DELEGATE_NONE:
7365 break;
7366 case NFS_OPEN_DELEGATE_READ:
7367 case NFS_OPEN_DELEGATE_WRITE:
7368 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7369 nfsm_chain_get_32(error, &nmrep, recall);
7370 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
7371 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7372 /* if we have any trouble accepting the ACE, just invalidate it */
7373 ace_type = ace_flags = ace_mask = len = 0;
7374 nfsm_chain_get_32(error, &nmrep, ace_type);
7375 nfsm_chain_get_32(error, &nmrep, ace_flags);
7376 nfsm_chain_get_32(error, &nmrep, ace_mask);
7377 nfsm_chain_get_32(error, &nmrep, len);
7378 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7379 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7380 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7381 if (!error && (len >= slen)) {
7382 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
7383 if (s)
7384 slen = len+1;
7385 else
7386 ace.ace_flags = 0;
7387 }
7388 if (s)
7389 nfsm_chain_get_opaque(error, &nmrep, len, s);
7390 else
7391 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7392 if (!error && s) {
7393 s[len] = '\0';
7394 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
7395 ace.ace_flags = 0;
7396 }
7397 if (error || !s)
7398 ace.ace_flags = 0;
7399 if (s && (s != sbuf))
7400 FREE(s, M_TEMP);
7401 break;
7402 default:
7403 error = EBADRPC;
7404 break;
7405 }
7406 /* At this point if we have no error, the object was created/opened. */
7407 open_error = error;
7408 } else {
7409 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7410 }
7411 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7412 nfsmout_if(error);
7413 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7414 nfsmout_if(error);
7415 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7416 error = EIO;
7417 goto nfsmout;
7418 }
7419 if (prefetch)
7420 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7421 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7422 if (!hadattrdir)
7423 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7424 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7425 nfsmout_if(error);
7426 xid = savedxid;
7427 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
7428 nfsmout_if(error);
7429
7430 if (open) {
7431 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
7432 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7433 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
7434 if (adnp) {
7435 nfs_node_unlock(adnp);
7436 adlockerror = ENOENT;
7437 }
7438 NVATTR_CLEANUP(&nvattr);
7439 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
7440 nfsmout_if(error);
7441 savedxid = xid;
7442 if ((adlockerror = nfs_node_lock(adnp)))
7443 error = adlockerror;
7444 }
7445 }
7446
7447 nfsmout:
7448 if (open && adnp && !adlockerror) {
7449 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
7450 adnp->n_flag &= ~NNEGNCENTRIES;
7451 cache_purge_negatives(NFSTOV(adnp));
7452 }
7453 adnp->n_flag |= NMODIFIED;
7454 nfs_node_unlock(adnp);
7455 adlockerror = ENOENT;
7456 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7457 }
7458 if (adnp && !adlockerror && (error == ENOENT) &&
7459 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
7460 /* add a negative entry in the name cache */
7461 cache_enter(NFSTOV(adnp), NULL, cnp);
7462 adnp->n_flag |= NNEGNCENTRIES;
7463 }
7464 if (adnp && !adlockerror) {
7465 nfs_node_unlock(adnp);
7466 adlockerror = ENOENT;
7467 }
7468 if (!error && !anp && fh.fh_len) {
7469 /* create the vnode with the filehandle and attributes */
7470 xid = savedxid;
7471 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
7472 if (!error) {
7473 *anpp = anp;
7474 nfs_node_unlock(anp);
7475 }
7476 if (!error && open) {
7477 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7478 /* After we have a node, add our open file struct to the node */
7479 nofp = newnofp;
7480 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
7481 if (error) {
7482 /* This shouldn't happen, because we passed in a new nofp to use. */
7483 printf("nfs_open_file_find_internal failed! %d\n", error);
7484 nofp = NULL;
7485 } else if (nofp != newnofp) {
7486 /*
7487 * Hmm... an open file struct already exists.
7488 * Mark the existing one busy and merge our open into it.
7489 * Then destroy the one we created.
7490 * Note: there's no chance of an open confict because the
7491 * open has already been granted.
7492 */
7493 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
7494 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7495 nofp->nof_stateid = newnofp->nof_stateid;
7496 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
7497 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7498 nfs_open_file_clear_busy(newnofp);
7499 nfs_open_file_destroy(newnofp);
7500 newnofp = NULL;
7501 }
7502 if (!error) {
7503 newnofp = NULL;
7504 nofpbusyerror = 0;
7505 /* mark the node as holding a create-initiated open */
7506 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7507 nofp->nof_creator = current_thread();
7508 if (nofpp)
7509 *nofpp = nofp;
7510 }
7511 }
7512 }
7513 NVATTR_CLEANUP(&nvattr);
7514 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
7515 if (!error && anp && !recall) {
7516 /* stuff the delegation state in the node */
7517 lck_mtx_lock(&anp->n_openlock);
7518 anp->n_openflags &= ~N_DELEG_MASK;
7519 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7520 anp->n_dstateid = dstateid;
7521 anp->n_dace = ace;
7522 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7523 lck_mtx_lock(&nmp->nm_lock);
7524 if (anp->n_dlink.tqe_next == NFSNOLIST)
7525 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7526 lck_mtx_unlock(&nmp->nm_lock);
7527 }
7528 lck_mtx_unlock(&anp->n_openlock);
7529 } else {
7530 /* give the delegation back */
7531 if (anp) {
7532 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
7533 /* update delegation state and return it */
7534 lck_mtx_lock(&anp->n_openlock);
7535 anp->n_openflags &= ~N_DELEG_MASK;
7536 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7537 anp->n_dstateid = dstateid;
7538 anp->n_dace = ace;
7539 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7540 lck_mtx_lock(&nmp->nm_lock);
7541 if (anp->n_dlink.tqe_next == NFSNOLIST)
7542 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7543 lck_mtx_unlock(&nmp->nm_lock);
7544 }
7545 lck_mtx_unlock(&anp->n_openlock);
7546 /* don't need to send a separate delegreturn for fh */
7547 fh.fh_len = 0;
7548 }
7549 /* return anp's current delegation */
7550 nfs4_delegation_return(anp, 0, thd, cred);
7551 }
7552 if (fh.fh_len) /* return fh's delegation if it wasn't for anp */
7553 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
7554 }
7555 }
7556 if (open) {
7557 if (newnofp) {
7558 /* need to cleanup our temporary nofp */
7559 nfs_open_file_clear_busy(newnofp);
7560 nfs_open_file_destroy(newnofp);
7561 newnofp = NULL;
7562 } else if (nofp && !nofpbusyerror) {
7563 nfs_open_file_clear_busy(nofp);
7564 nofpbusyerror = ENOENT;
7565 }
7566 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
7567 inuse = 0;
7568 nofp = newnofp = NULL;
7569 rflags = delegation = recall = eof = rlen = retlen = 0;
7570 ace.ace_flags = 0;
7571 s = sbuf;
7572 slen = sizeof(sbuf);
7573 nfsm_chain_cleanup(&nmreq);
7574 nfsm_chain_cleanup(&nmrep);
7575 if (anp) {
7576 vnode_put(NFSTOV(anp));
7577 *anpp = anp = NULL;
7578 }
7579 hadattrdir = (adnp != NULL);
7580 if (noopbusy) {
7581 nfs_open_owner_clear_busy(noop);
7582 noopbusy = 0;
7583 }
7584 goto restart;
7585 }
7586 if (noop) {
7587 if (noopbusy) {
7588 nfs_open_owner_clear_busy(noop);
7589 noopbusy = 0;
7590 }
7591 nfs_open_owner_rele(noop);
7592 }
7593 }
7594 if (!error && prefetch && nmrep.nmc_mhead) {
7595 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7596 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
7597 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
7598 nfsm_chain_get_32(error, &nmrep, eof);
7599 nfsm_chain_get_32(error, &nmrep, retlen);
7600 if (!error && anp) {
7601 /*
7602 * There can be one problem with doing the prefetch.
7603 * Because we don't have the node before we start the RPC, we
7604 * can't have the buffer busy while the READ is performed.
7605 * So there is a chance that other I/O occured on the same
7606 * range of data while we were performing this RPC. If that
7607 * happens, then it's possible the data we have in the READ
7608 * response is no longer up to date.
7609 * Once we have the node and the buffer, we need to make sure
7610 * that there's no chance we could be putting stale data in
7611 * the buffer.
7612 * So, we check if the range read is dirty or if any I/O may
7613 * have occured on it while we were performing our RPC.
7614 */
7615 struct nfsbuf *bp = NULL;
7616 int lastpg;
7617 uint32_t pagemask;
7618
7619 retlen = MIN(retlen, rlen);
7620
7621 /* check if node needs size update or invalidation */
7622 if (ISSET(anp->n_flag, NUPDATESIZE))
7623 nfs_data_update_size(anp, 0);
7624 if (!(error = nfs_node_lock(anp))) {
7625 if (anp->n_flag & NNEEDINVALIDATE) {
7626 anp->n_flag &= ~NNEEDINVALIDATE;
7627 nfs_node_unlock(anp);
7628 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
7629 if (!error) /* lets play it safe and just drop the data */
7630 error = EIO;
7631 } else {
7632 nfs_node_unlock(anp);
7633 }
7634 }
7635
7636 /* calculate page mask for the range of data read */
7637 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
7638 pagemask = ((1 << (lastpg + 1)) - 1);
7639
7640 if (!error)
7641 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
7642 /* don't save the data if dirty or potential I/O conflict */
7643 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
7644 timevalcmp(&anp->n_lastio, &now, <)) {
7645 OSAddAtomic64(1, &nfsstats.read_bios);
7646 CLR(bp->nb_flags, (NB_DONE|NB_ASYNC));
7647 SET(bp->nb_flags, NB_READ);
7648 NFS_BUF_MAP(bp);
7649 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
7650 if (error) {
7651 bp->nb_error = error;
7652 SET(bp->nb_flags, NB_ERROR);
7653 } else {
7654 bp->nb_offio = 0;
7655 bp->nb_endio = rlen;
7656 if ((retlen > 0) && (bp->nb_endio < (int)retlen))
7657 bp->nb_endio = retlen;
7658 if (eof || (retlen == 0)) {
7659 /* zero out the remaining data (up to EOF) */
7660 off_t rpcrem, eofrem, rem;
7661 rpcrem = (rlen - retlen);
7662 eofrem = anp->n_size - (NBOFF(bp) + retlen);
7663 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
7664 if (rem > 0)
7665 bzero(bp->nb_data + retlen, rem);
7666 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
7667 /* ugh... short read ... just invalidate for now... */
7668 SET(bp->nb_flags, NB_INVAL);
7669 }
7670 }
7671 nfs_buf_read_finish(bp);
7672 microuptime(&anp->n_lastio);
7673 }
7674 if (bp)
7675 nfs_buf_release(bp, 1);
7676 }
7677 error = 0; /* ignore any transient error in processing the prefetch */
7678 }
7679 if (adnp && !adbusyerror) {
7680 nfs_node_clear_busy(adnp);
7681 adbusyerror = ENOENT;
7682 }
7683 if (!busyerror) {
7684 nfs_node_clear_busy(np);
7685 busyerror = ENOENT;
7686 }
7687 if (adnp)
7688 vnode_put(NFSTOV(adnp));
7689 if (error && *anpp) {
7690 vnode_put(NFSTOV(*anpp));
7691 *anpp = NULL;
7692 }
7693 nfsm_chain_cleanup(&nmreq);
7694 nfsm_chain_cleanup(&nmrep);
7695 return (error);
7696 }
7697
7698 /*
7699 * Remove a named attribute.
7700 */
7701 int
7702 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
7703 {
7704 nfsnode_t adnp = NULL;
7705 struct nfsmount *nmp;
7706 struct componentname cn;
7707 struct vnop_remove_args vra;
7708 int error, putanp = 0;
7709
7710 nmp = NFSTONMP(np);
7711 if (nfs_mount_gone(nmp))
7712 return (ENXIO);
7713
7714 bzero(&cn, sizeof(cn));
7715 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7716 cn.cn_namelen = strlen(name);
7717 cn.cn_nameiop = DELETE;
7718 cn.cn_flags = 0;
7719
7720 if (!anp) {
7721 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7722 0, ctx, &anp, NULL);
7723 if ((!error && !anp) || (error == ENOATTR))
7724 error = ENOENT;
7725 if (error) {
7726 if (anp) {
7727 vnode_put(NFSTOV(anp));
7728 anp = NULL;
7729 }
7730 goto out;
7731 }
7732 putanp = 1;
7733 }
7734
7735 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7736 goto out;
7737 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7738 nfs_node_clear_busy(np);
7739 if (!adnp) {
7740 error = ENOENT;
7741 goto out;
7742 }
7743
7744 vra.a_desc = &vnop_remove_desc;
7745 vra.a_dvp = NFSTOV(adnp);
7746 vra.a_vp = NFSTOV(anp);
7747 vra.a_cnp = &cn;
7748 vra.a_flags = 0;
7749 vra.a_context = ctx;
7750 error = nfs_vnop_remove(&vra);
7751 out:
7752 if (adnp)
7753 vnode_put(NFSTOV(adnp));
7754 if (putanp)
7755 vnode_put(NFSTOV(anp));
7756 return (error);
7757 }
7758
7759 int
7760 nfs4_vnop_getxattr(
7761 struct vnop_getxattr_args /* {
7762 struct vnodeop_desc *a_desc;
7763 vnode_t a_vp;
7764 const char * a_name;
7765 uio_t a_uio;
7766 size_t *a_size;
7767 int a_options;
7768 vfs_context_t a_context;
7769 } */ *ap)
7770 {
7771 vfs_context_t ctx = ap->a_context;
7772 struct nfsmount *nmp;
7773 struct nfs_vattr nvattr;
7774 struct componentname cn;
7775 nfsnode_t anp;
7776 int error = 0, isrsrcfork;
7777
7778 nmp = VTONMP(ap->a_vp);
7779 if (nfs_mount_gone(nmp))
7780 return (ENXIO);
7781
7782 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7783 return (ENOTSUP);
7784 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
7785 if (error)
7786 return (error);
7787 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7788 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7789 return (ENOATTR);
7790
7791 bzero(&cn, sizeof(cn));
7792 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
7793 cn.cn_namelen = strlen(ap->a_name);
7794 cn.cn_nameiop = LOOKUP;
7795 cn.cn_flags = MAKEENTRY;
7796
7797 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7798 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7799
7800 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7801 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
7802 if ((!error && !anp) || (error == ENOENT))
7803 error = ENOATTR;
7804 if (!error) {
7805 if (ap->a_uio)
7806 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
7807 else
7808 *ap->a_size = anp->n_size;
7809 }
7810 if (anp)
7811 vnode_put(NFSTOV(anp));
7812 return (error);
7813 }
7814
7815 int
7816 nfs4_vnop_setxattr(
7817 struct vnop_setxattr_args /* {
7818 struct vnodeop_desc *a_desc;
7819 vnode_t a_vp;
7820 const char * a_name;
7821 uio_t a_uio;
7822 int a_options;
7823 vfs_context_t a_context;
7824 } */ *ap)
7825 {
7826 vfs_context_t ctx = ap->a_context;
7827 int options = ap->a_options;
7828 uio_t uio = ap->a_uio;
7829 const char *name = ap->a_name;
7830 struct nfsmount *nmp;
7831 struct componentname cn;
7832 nfsnode_t anp = NULL;
7833 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
7834 #define FINDERINFOSIZE 32
7835 uint8_t finfo[FINDERINFOSIZE];
7836 uint32_t *finfop;
7837 struct nfs_open_file *nofp = NULL;
7838 char uio_buf [ UIO_SIZEOF(1) ];
7839 uio_t auio;
7840 struct vnop_write_args vwa;
7841
7842 nmp = VTONMP(ap->a_vp);
7843 if (nfs_mount_gone(nmp))
7844 return (ENXIO);
7845
7846 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7847 return (ENOTSUP);
7848
7849 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE))
7850 return (EINVAL);
7851
7852 /* XXX limitation based on need to back up uio on short write */
7853 if (uio_iovcnt(uio) > 1) {
7854 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7855 return (EINVAL);
7856 }
7857
7858 bzero(&cn, sizeof(cn));
7859 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7860 cn.cn_namelen = strlen(name);
7861 cn.cn_nameiop = CREATE;
7862 cn.cn_flags = MAKEENTRY;
7863
7864 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
7865 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7866 if (!isrsrcfork)
7867 uio_setoffset(uio, 0);
7868 if (isfinderinfo) {
7869 if (uio_resid(uio) != sizeof(finfo))
7870 return (ERANGE);
7871 error = uiomove((char*)&finfo, sizeof(finfo), uio);
7872 if (error)
7873 return (error);
7874 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7875 empty = 1;
7876 for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++)
7877 if (finfop[i]) {
7878 empty = 0;
7879 break;
7880 }
7881 if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) {
7882 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7883 if (error == ENOENT)
7884 error = 0;
7885 return (error);
7886 }
7887 /* first, let's see if we get a create/replace error */
7888 }
7889
7890 /*
7891 * create/open the xattr
7892 *
7893 * We need to make sure not to create it if XATTR_REPLACE.
7894 * For all xattrs except the resource fork, we also want to
7895 * truncate the xattr to remove any current data. We'll do
7896 * that by setting the size to 0 on create/open.
7897 */
7898 flags = 0;
7899 if (!(options & XATTR_REPLACE))
7900 flags |= NFS_GET_NAMED_ATTR_CREATE;
7901 if (options & XATTR_CREATE)
7902 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
7903 if (!isrsrcfork)
7904 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
7905
7906 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
7907 flags, ctx, &anp, &nofp);
7908 if (!error && !anp)
7909 error = ENOATTR;
7910 if (error)
7911 goto out;
7912 /* grab the open state from the get/create/open */
7913 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
7914 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
7915 nofp->nof_creator = NULL;
7916 nfs_open_file_clear_busy(nofp);
7917 }
7918
7919 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7920 if (isfinderinfo && empty)
7921 goto doclose;
7922
7923 /*
7924 * Write the data out and flush.
7925 *
7926 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7927 */
7928 vwa.a_desc = &vnop_write_desc;
7929 vwa.a_vp = NFSTOV(anp);
7930 vwa.a_uio = NULL;
7931 vwa.a_ioflag = 0;
7932 vwa.a_context = ctx;
7933 if (isfinderinfo) {
7934 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
7935 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
7936 vwa.a_uio = auio;
7937 } else if (uio_resid(uio) > 0) {
7938 vwa.a_uio = uio;
7939 }
7940 if (vwa.a_uio) {
7941 error = nfs_vnop_write(&vwa);
7942 if (!error)
7943 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
7944 }
7945 doclose:
7946 /* Close the xattr. */
7947 if (nofp) {
7948 int busyerror = nfs_open_file_set_busy(nofp, NULL);
7949 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
7950 if (!busyerror)
7951 nfs_open_file_clear_busy(nofp);
7952 }
7953 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
7954 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7955 if (error == ENOENT)
7956 error = 0;
7957 }
7958 if (!error)
7959 error = closeerror;
7960 out:
7961 if (anp)
7962 vnode_put(NFSTOV(anp));
7963 if (error == ENOENT)
7964 error = ENOATTR;
7965 return (error);
7966 }
7967
7968 int
7969 nfs4_vnop_removexattr(
7970 struct vnop_removexattr_args /* {
7971 struct vnodeop_desc *a_desc;
7972 vnode_t a_vp;
7973 const char * a_name;
7974 int a_options;
7975 vfs_context_t a_context;
7976 } */ *ap)
7977 {
7978 struct nfsmount *nmp = VTONMP(ap->a_vp);
7979 int error;
7980
7981 if (nfs_mount_gone(nmp))
7982 return (ENXIO);
7983 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7984 return (ENOTSUP);
7985
7986 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
7987 if (error == ENOENT)
7988 error = ENOATTR;
7989 return (error);
7990 }
7991
7992 int
7993 nfs4_vnop_listxattr(
7994 struct vnop_listxattr_args /* {
7995 struct vnodeop_desc *a_desc;
7996 vnode_t a_vp;
7997 uio_t a_uio;
7998 size_t *a_size;
7999 int a_options;
8000 vfs_context_t a_context;
8001 } */ *ap)
8002 {
8003 vfs_context_t ctx = ap->a_context;
8004 nfsnode_t np = VTONFS(ap->a_vp);
8005 uio_t uio = ap->a_uio;
8006 nfsnode_t adnp = NULL;
8007 struct nfsmount *nmp;
8008 int error, done, i;
8009 struct nfs_vattr nvattr;
8010 uint64_t cookie, nextcookie, lbn = 0;
8011 struct nfsbuf *bp = NULL;
8012 struct nfs_dir_buf_header *ndbhp;
8013 struct direntry *dp;
8014
8015 nmp = VTONMP(ap->a_vp);
8016 if (nfs_mount_gone(nmp))
8017 return (ENXIO);
8018
8019 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8020 return (ENOTSUP);
8021
8022 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8023 if (error)
8024 return (error);
8025 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8026 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8027 return (0);
8028
8029 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
8030 return (error);
8031 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8032 nfs_node_clear_busy(np);
8033 if (!adnp)
8034 goto out;
8035
8036 if ((error = nfs_node_lock(adnp)))
8037 goto out;
8038
8039 if (adnp->n_flag & NNEEDINVALIDATE) {
8040 adnp->n_flag &= ~NNEEDINVALIDATE;
8041 nfs_invaldir(adnp);
8042 nfs_node_unlock(adnp);
8043 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8044 if (!error)
8045 error = nfs_node_lock(adnp);
8046 if (error)
8047 goto out;
8048 }
8049
8050 /*
8051 * check for need to invalidate when (re)starting at beginning
8052 */
8053 if (adnp->n_flag & NMODIFIED) {
8054 nfs_invaldir(adnp);
8055 nfs_node_unlock(adnp);
8056 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1)))
8057 goto out;
8058 } else {
8059 nfs_node_unlock(adnp);
8060 }
8061 /* nfs_getattr() will check changed and purge caches */
8062 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED)))
8063 goto out;
8064
8065 if (uio && (uio_resid(uio) == 0))
8066 goto out;
8067
8068 done = 0;
8069 nextcookie = lbn = 0;
8070
8071 while (!error && !done) {
8072 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8073 cookie = nextcookie;
8074 getbuffer:
8075 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8076 if (error)
8077 goto out;
8078 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8079 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8080 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8081 ndbhp->ndbh_flags = 0;
8082 ndbhp->ndbh_count = 0;
8083 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8084 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8085 }
8086 error = nfs_buf_readdir(bp, ctx);
8087 if (error == NFSERR_DIRBUFDROPPED)
8088 goto getbuffer;
8089 if (error)
8090 nfs_buf_release(bp, 1);
8091 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8092 if (!nfs_node_lock(adnp)) {
8093 nfs_invaldir(adnp);
8094 nfs_node_unlock(adnp);
8095 }
8096 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8097 if (error == NFSERR_BAD_COOKIE)
8098 error = ENOENT;
8099 }
8100 if (error)
8101 goto out;
8102 }
8103
8104 /* go through all the entries copying/counting */
8105 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8106 for (i=0; i < ndbhp->ndbh_count; i++) {
8107 if (!xattr_protected(dp->d_name)) {
8108 if (uio == NULL) {
8109 *ap->a_size += dp->d_namlen + 1;
8110 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8111 error = ERANGE;
8112 } else {
8113 error = uiomove(dp->d_name, dp->d_namlen+1, uio);
8114 if (error && (error != EFAULT))
8115 error = ERANGE;
8116 }
8117 }
8118 nextcookie = dp->d_seekoff;
8119 dp = NFS_DIRENTRY_NEXT(dp);
8120 }
8121
8122 if (i == ndbhp->ndbh_count) {
8123 /* hit end of buffer, move to next buffer */
8124 lbn = nextcookie;
8125 /* if we also hit EOF, we're done */
8126 if (ISSET(ndbhp->ndbh_flags, NDB_EOF))
8127 done = 1;
8128 }
8129 if (!error && !done && (nextcookie == cookie)) {
8130 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8131 error = EIO;
8132 }
8133 nfs_buf_release(bp, 1);
8134 }
8135 out:
8136 if (adnp)
8137 vnode_put(NFSTOV(adnp));
8138 return (error);
8139 }
8140
8141 #if NAMEDSTREAMS
8142 int
8143 nfs4_vnop_getnamedstream(
8144 struct vnop_getnamedstream_args /* {
8145 struct vnodeop_desc *a_desc;
8146 vnode_t a_vp;
8147 vnode_t *a_svpp;
8148 const char *a_name;
8149 enum nsoperation a_operation;
8150 int a_flags;
8151 vfs_context_t a_context;
8152 } */ *ap)
8153 {
8154 vfs_context_t ctx = ap->a_context;
8155 struct nfsmount *nmp;
8156 struct nfs_vattr nvattr;
8157 struct componentname cn;
8158 nfsnode_t anp;
8159 int error = 0;
8160
8161 nmp = VTONMP(ap->a_vp);
8162 if (nfs_mount_gone(nmp))
8163 return (ENXIO);
8164
8165 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8166 return (ENOTSUP);
8167 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8168 if (error)
8169 return (error);
8170 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8171 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8172 return (ENOATTR);
8173
8174 bzero(&cn, sizeof(cn));
8175 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8176 cn.cn_namelen = strlen(ap->a_name);
8177 cn.cn_nameiop = LOOKUP;
8178 cn.cn_flags = MAKEENTRY;
8179
8180 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8181 0, ctx, &anp, NULL);
8182 if ((!error && !anp) || (error == ENOENT))
8183 error = ENOATTR;
8184 if (!error && anp)
8185 *ap->a_svpp = NFSTOV(anp);
8186 else if (anp)
8187 vnode_put(NFSTOV(anp));
8188 return (error);
8189 }
8190
8191 int
8192 nfs4_vnop_makenamedstream(
8193 struct vnop_makenamedstream_args /* {
8194 struct vnodeop_desc *a_desc;
8195 vnode_t *a_svpp;
8196 vnode_t a_vp;
8197 const char *a_name;
8198 int a_flags;
8199 vfs_context_t a_context;
8200 } */ *ap)
8201 {
8202 vfs_context_t ctx = ap->a_context;
8203 struct nfsmount *nmp;
8204 struct componentname cn;
8205 nfsnode_t anp;
8206 int error = 0;
8207
8208 nmp = VTONMP(ap->a_vp);
8209 if (nfs_mount_gone(nmp))
8210 return (ENXIO);
8211
8212 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8213 return (ENOTSUP);
8214
8215 bzero(&cn, sizeof(cn));
8216 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8217 cn.cn_namelen = strlen(ap->a_name);
8218 cn.cn_nameiop = CREATE;
8219 cn.cn_flags = MAKEENTRY;
8220
8221 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8222 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8223 if ((!error && !anp) || (error == ENOENT))
8224 error = ENOATTR;
8225 if (!error && anp)
8226 *ap->a_svpp = NFSTOV(anp);
8227 else if (anp)
8228 vnode_put(NFSTOV(anp));
8229 return (error);
8230 }
8231
8232 int
8233 nfs4_vnop_removenamedstream(
8234 struct vnop_removenamedstream_args /* {
8235 struct vnodeop_desc *a_desc;
8236 vnode_t a_vp;
8237 vnode_t a_svp;
8238 const char *a_name;
8239 int a_flags;
8240 vfs_context_t a_context;
8241 } */ *ap)
8242 {
8243 struct nfsmount *nmp = VTONMP(ap->a_vp);
8244 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8245 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8246
8247 if (nfs_mount_gone(nmp))
8248 return (ENXIO);
8249
8250 /*
8251 * Given that a_svp is a named stream, checking for
8252 * named attribute support is kinda pointless.
8253 */
8254 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8255 return (ENOTSUP);
8256
8257 return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context));
8258 }
8259
8260 #endif