]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
d0812cfaa74cda38f9d6c824dca9975f81637f71
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
52
53 #include <vfs/vfs_support.h>
54
55 #include <sys/vm.h>
56
57 #include <sys/time.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
60
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
63
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
66 #include <nfs/nfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
73
74 #include <net/if.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
78
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
81
82 int
83 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
84 {
85 int error = 0, lockerror = ENOENT, status, numops, slot;
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
89 uint32_t access_result = 0, supported = 0, missing;
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
93 struct nfsreq_secinfo_args si;
94
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
96 return (0);
97
98 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
99 nfsm_chain_null(&nmreq);
100 nfsm_chain_null(&nmrep);
101
102 // PUTFH, ACCESS, GETATTR
103 numops = 3;
104 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
105 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
106 numops--;
107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
108 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
109 numops--;
110 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
111 nfsm_chain_add_32(error, &nmreq, *access);
112 numops--;
113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
114 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
115 nfsm_chain_build_done(error, &nmreq);
116 nfsm_assert(error, (numops == 0), EPROTO);
117 nfsmout_if(error);
118 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
119 vfs_context_thread(ctx), vfs_context_ucred(ctx),
120 &si, rpcflags, &nmrep, &xid, &status);
121
122 if ((lockerror = nfs_node_lock(np)))
123 error = lockerror;
124 nfsm_chain_skip_tag(error, &nmrep);
125 nfsm_chain_get_32(error, &nmrep, numops);
126 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
127 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
128 nfsm_chain_get_32(error, &nmrep, supported);
129 nfsm_chain_get_32(error, &nmrep, access_result);
130 nfsmout_if(error);
131 if ((missing = (*access & ~supported))) {
132 /* missing support for something(s) we wanted */
133 if (missing & NFS_ACCESS_DELETE) {
134 /*
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
139 */
140 access_result |= NFS_ACCESS_DELETE;
141 }
142 }
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs) {
145 vnode_t dvp = NULLVP;
146 if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */
147 access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE);
148 else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD))
149 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
150 if (dvp != NULLVP)
151 vnode_put(dvp);
152 }
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE))
155 access_result |= NFS_ACCESS_DELETE;
156 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
157 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
158 nfsmout_if(error);
159
160 if (nfs_mount_gone(nmp)) {
161 error = ENXIO;
162 }
163 nfsmout_if(error);
164
165 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
166 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
167 } else {
168 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
169 }
170 slot = nfs_node_access_slot(np, uid, 1);
171 np->n_accessuid[slot] = uid;
172 microuptime(&now);
173 np->n_accessstamp[slot] = now.tv_sec;
174 np->n_access[slot] = access_result;
175
176 /* pass back the access returned with this request */
177 *access = np->n_access[slot];
178 nfsmout:
179 if (!lockerror)
180 nfs_node_unlock(np);
181 nfsm_chain_cleanup(&nmreq);
182 nfsm_chain_cleanup(&nmrep);
183 return (error);
184 }
185
186 int
187 nfs4_getattr_rpc(
188 nfsnode_t np,
189 mount_t mp,
190 u_char *fhp,
191 size_t fhsize,
192 int flags,
193 vfs_context_t ctx,
194 struct nfs_vattr *nvap,
195 u_int64_t *xidp)
196 {
197 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
198 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
199 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
200 struct nfsm_chain nmreq, nmrep;
201 struct nfsreq_secinfo_args si;
202
203 if (nfs_mount_gone(nmp))
204 return (ENXIO);
205 nfsvers = nmp->nm_vers;
206 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
207
208 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
209 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
210 return (0);
211 }
212
213 if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
214 rpcflags = R_RECOVER;
215
216 if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */
217 rpcflags |= R_SOFT;
218
219 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
220 nfsm_chain_null(&nmreq);
221 nfsm_chain_null(&nmrep);
222
223 // PUTFH, GETATTR
224 numops = 2;
225 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
226 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
227 numops--;
228 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
229 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
230 numops--;
231 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
233 if ((flags & NGA_ACL) && acls)
234 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
235 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
236 nfsm_chain_build_done(error, &nmreq);
237 nfsm_assert(error, (numops == 0), EPROTO);
238 nfsmout_if(error);
239 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
240 vfs_context_thread(ctx), vfs_context_ucred(ctx),
241 NULL, rpcflags, &nmrep, xidp, &status);
242
243 nfsm_chain_skip_tag(error, &nmrep);
244 nfsm_chain_get_32(error, &nmrep, numops);
245 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
246 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
247 nfsmout_if(error);
248 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
249 nfsmout_if(error);
250 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
251 /* we asked for the ACL but didn't get one... assume there isn't one */
252 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
253 nvap->nva_acl = NULL;
254 }
255 nfsmout:
256 nfsm_chain_cleanup(&nmreq);
257 nfsm_chain_cleanup(&nmrep);
258 return (error);
259 }
260
261 int
262 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
263 {
264 struct nfsmount *nmp;
265 int error = 0, lockerror = ENOENT, status, numops;
266 uint32_t len = 0;
267 u_int64_t xid;
268 struct nfsm_chain nmreq, nmrep;
269 struct nfsreq_secinfo_args si;
270
271 nmp = NFSTONMP(np);
272 if (nfs_mount_gone(nmp))
273 return (ENXIO);
274 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
275 return (EINVAL);
276 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
277 nfsm_chain_null(&nmreq);
278 nfsm_chain_null(&nmrep);
279
280 // PUTFH, GETATTR, READLINK
281 numops = 3;
282 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
283 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
284 numops--;
285 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
286 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
287 numops--;
288 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
289 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
290 numops--;
291 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
292 nfsm_chain_build_done(error, &nmreq);
293 nfsm_assert(error, (numops == 0), EPROTO);
294 nfsmout_if(error);
295 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
296
297 if ((lockerror = nfs_node_lock(np)))
298 error = lockerror;
299 nfsm_chain_skip_tag(error, &nmrep);
300 nfsm_chain_get_32(error, &nmrep, numops);
301 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
302 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
303 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
304 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
305 nfsm_chain_get_32(error, &nmrep, len);
306 nfsmout_if(error);
307 if (len >= *buflenp) {
308 if (np->n_size && (np->n_size < *buflenp))
309 len = np->n_size;
310 else
311 len = *buflenp - 1;
312 }
313 nfsm_chain_get_opaque(error, &nmrep, len, buf);
314 if (!error)
315 *buflenp = len;
316 nfsmout:
317 if (!lockerror)
318 nfs_node_unlock(np);
319 nfsm_chain_cleanup(&nmreq);
320 nfsm_chain_cleanup(&nmrep);
321 return (error);
322 }
323
324 int
325 nfs4_read_rpc_async(
326 nfsnode_t np,
327 off_t offset,
328 size_t len,
329 thread_t thd,
330 kauth_cred_t cred,
331 struct nfsreq_cbinfo *cb,
332 struct nfsreq **reqp)
333 {
334 struct nfsmount *nmp;
335 int error = 0, nfsvers, numops;
336 nfs_stateid stateid;
337 struct nfsm_chain nmreq;
338 struct nfsreq_secinfo_args si;
339
340 nmp = NFSTONMP(np);
341 if (nfs_mount_gone(nmp))
342 return (ENXIO);
343 nfsvers = nmp->nm_vers;
344 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
345 return (EINVAL);
346
347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
348 nfsm_chain_null(&nmreq);
349
350 // PUTFH, READ, GETATTR
351 numops = 3;
352 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
353 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
354 numops--;
355 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
356 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
357 numops--;
358 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
359 nfs_get_stateid(np, thd, cred, &stateid);
360 nfsm_chain_add_stateid(error, &nmreq, &stateid);
361 nfsm_chain_add_64(error, &nmreq, offset);
362 nfsm_chain_add_32(error, &nmreq, len);
363 numops--;
364 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
365 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
366 nfsm_chain_build_done(error, &nmreq);
367 nfsm_assert(error, (numops == 0), EPROTO);
368 nfsmout_if(error);
369 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
370 nfsmout:
371 nfsm_chain_cleanup(&nmreq);
372 return (error);
373 }
374
375 int
376 nfs4_read_rpc_async_finish(
377 nfsnode_t np,
378 struct nfsreq *req,
379 uio_t uio,
380 size_t *lenp,
381 int *eofp)
382 {
383 struct nfsmount *nmp;
384 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
385 size_t retlen = 0;
386 u_int64_t xid;
387 struct nfsm_chain nmrep;
388
389 nmp = NFSTONMP(np);
390 if (nfs_mount_gone(nmp)) {
391 nfs_request_async_cancel(req);
392 return (ENXIO);
393 }
394 nfsvers = nmp->nm_vers;
395
396 nfsm_chain_null(&nmrep);
397
398 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
399 if (error == EINPROGRESS) /* async request restarted */
400 return (error);
401
402 if ((lockerror = nfs_node_lock(np)))
403 error = lockerror;
404 nfsm_chain_skip_tag(error, &nmrep);
405 nfsm_chain_get_32(error, &nmrep, numops);
406 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
407 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
408 nfsm_chain_get_32(error, &nmrep, eof);
409 nfsm_chain_get_32(error, &nmrep, retlen);
410 if (!error) {
411 *lenp = MIN(retlen, *lenp);
412 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
413 }
414 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
415 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
416 if (!lockerror)
417 nfs_node_unlock(np);
418 if (eofp) {
419 if (!eof && !retlen)
420 eof = 1;
421 *eofp = eof;
422 }
423 nfsm_chain_cleanup(&nmrep);
424 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
425 microuptime(&np->n_lastio);
426 return (error);
427 }
428
429 int
430 nfs4_write_rpc_async(
431 nfsnode_t np,
432 uio_t uio,
433 size_t len,
434 thread_t thd,
435 kauth_cred_t cred,
436 int iomode,
437 struct nfsreq_cbinfo *cb,
438 struct nfsreq **reqp)
439 {
440 struct nfsmount *nmp;
441 mount_t mp;
442 int error = 0, nfsvers, numops;
443 nfs_stateid stateid;
444 struct nfsm_chain nmreq;
445 struct nfsreq_secinfo_args si;
446
447 nmp = NFSTONMP(np);
448 if (nfs_mount_gone(nmp))
449 return (ENXIO);
450 nfsvers = nmp->nm_vers;
451 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
452 return (EINVAL);
453
454 /* for async mounts, don't bother sending sync write requests */
455 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
456 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
457 iomode = NFS_WRITE_UNSTABLE;
458
459 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
460 nfsm_chain_null(&nmreq);
461
462 // PUTFH, WRITE, GETATTR
463 numops = 3;
464 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
465 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
466 numops--;
467 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
468 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
469 numops--;
470 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
471 nfs_get_stateid(np, thd, cred, &stateid);
472 nfsm_chain_add_stateid(error, &nmreq, &stateid);
473 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
474 nfsm_chain_add_32(error, &nmreq, iomode);
475 nfsm_chain_add_32(error, &nmreq, len);
476 if (!error)
477 error = nfsm_chain_add_uio(&nmreq, uio, len);
478 numops--;
479 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
480 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
481 nfsm_chain_build_done(error, &nmreq);
482 nfsm_assert(error, (numops == 0), EPROTO);
483 nfsmout_if(error);
484
485 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
486 nfsmout:
487 nfsm_chain_cleanup(&nmreq);
488 return (error);
489 }
490
491 int
492 nfs4_write_rpc_async_finish(
493 nfsnode_t np,
494 struct nfsreq *req,
495 int *iomodep,
496 size_t *rlenp,
497 uint64_t *wverfp)
498 {
499 struct nfsmount *nmp;
500 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
501 int committed = NFS_WRITE_FILESYNC;
502 size_t rlen = 0;
503 u_int64_t xid, wverf;
504 mount_t mp;
505 struct nfsm_chain nmrep;
506
507 nmp = NFSTONMP(np);
508 if (nfs_mount_gone(nmp)) {
509 nfs_request_async_cancel(req);
510 return (ENXIO);
511 }
512 nfsvers = nmp->nm_vers;
513
514 nfsm_chain_null(&nmrep);
515
516 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
517 if (error == EINPROGRESS) /* async request restarted */
518 return (error);
519 nmp = NFSTONMP(np);
520 if (nfs_mount_gone(nmp))
521 error = ENXIO;
522 if (!error && (lockerror = nfs_node_lock(np)))
523 error = lockerror;
524 nfsm_chain_skip_tag(error, &nmrep);
525 nfsm_chain_get_32(error, &nmrep, numops);
526 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
527 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
528 nfsm_chain_get_32(error, &nmrep, rlen);
529 nfsmout_if(error);
530 *rlenp = rlen;
531 if (rlen <= 0)
532 error = NFSERR_IO;
533 nfsm_chain_get_32(error, &nmrep, committed);
534 nfsm_chain_get_64(error, &nmrep, wverf);
535 nfsmout_if(error);
536 if (wverfp)
537 *wverfp = wverf;
538 lck_mtx_lock(&nmp->nm_lock);
539 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
540 nmp->nm_verf = wverf;
541 nmp->nm_state |= NFSSTA_HASWRITEVERF;
542 } else if (nmp->nm_verf != wverf) {
543 nmp->nm_verf = wverf;
544 }
545 lck_mtx_unlock(&nmp->nm_lock);
546 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
547 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
548 nfsmout:
549 if (!lockerror)
550 nfs_node_unlock(np);
551 nfsm_chain_cleanup(&nmrep);
552 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
553 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
554 committed = NFS_WRITE_FILESYNC;
555 *iomodep = committed;
556 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
557 microuptime(&np->n_lastio);
558 return (error);
559 }
560
561 int
562 nfs4_remove_rpc(
563 nfsnode_t dnp,
564 char *name,
565 int namelen,
566 thread_t thd,
567 kauth_cred_t cred)
568 {
569 int error = 0, lockerror = ENOENT, remove_error = 0, status;
570 struct nfsmount *nmp;
571 int nfsvers, numops;
572 u_int64_t xid;
573 struct nfsm_chain nmreq, nmrep;
574 struct nfsreq_secinfo_args si;
575
576 nmp = NFSTONMP(dnp);
577 if (nfs_mount_gone(nmp))
578 return (ENXIO);
579 nfsvers = nmp->nm_vers;
580 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
581 return (EINVAL);
582 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
583 restart:
584 nfsm_chain_null(&nmreq);
585 nfsm_chain_null(&nmrep);
586
587 // PUTFH, REMOVE, GETATTR
588 numops = 3;
589 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
590 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
591 numops--;
592 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
593 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
594 numops--;
595 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
596 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
597 numops--;
598 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
599 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
600 nfsm_chain_build_done(error, &nmreq);
601 nfsm_assert(error, (numops == 0), EPROTO);
602 nfsmout_if(error);
603
604 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
605
606 if ((lockerror = nfs_node_lock(dnp)))
607 error = lockerror;
608 nfsm_chain_skip_tag(error, &nmrep);
609 nfsm_chain_get_32(error, &nmrep, numops);
610 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
611 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
612 remove_error = error;
613 nfsm_chain_check_change_info(error, &nmrep, dnp);
614 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
615 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
616 if (error && !lockerror)
617 NATTRINVALIDATE(dnp);
618 nfsmout:
619 nfsm_chain_cleanup(&nmreq);
620 nfsm_chain_cleanup(&nmrep);
621
622 if (!lockerror) {
623 dnp->n_flag |= NMODIFIED;
624 nfs_node_unlock(dnp);
625 }
626 if (error == NFSERR_GRACE) {
627 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
628 goto restart;
629 }
630
631 return (remove_error);
632 }
633
634 int
635 nfs4_rename_rpc(
636 nfsnode_t fdnp,
637 char *fnameptr,
638 int fnamelen,
639 nfsnode_t tdnp,
640 char *tnameptr,
641 int tnamelen,
642 vfs_context_t ctx)
643 {
644 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
645 struct nfsmount *nmp;
646 u_int64_t xid, savedxid;
647 struct nfsm_chain nmreq, nmrep;
648 struct nfsreq_secinfo_args si;
649
650 nmp = NFSTONMP(fdnp);
651 if (nfs_mount_gone(nmp))
652 return (ENXIO);
653 nfsvers = nmp->nm_vers;
654 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
655 return (EINVAL);
656 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
657 return (EINVAL);
658
659 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
660 nfsm_chain_null(&nmreq);
661 nfsm_chain_null(&nmrep);
662
663 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
664 numops = 7;
665 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
666 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
667 numops--;
668 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
669 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
670 numops--;
671 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
672 numops--;
673 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
674 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
675 numops--;
676 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
677 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
678 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
679 numops--;
680 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
681 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
682 numops--;
683 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
684 numops--;
685 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
686 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
687 nfsm_chain_build_done(error, &nmreq);
688 nfsm_assert(error, (numops == 0), EPROTO);
689 nfsmout_if(error);
690
691 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
692
693 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
694 error = lockerror;
695 nfsm_chain_skip_tag(error, &nmrep);
696 nfsm_chain_get_32(error, &nmrep, numops);
697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
698 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
699 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
700 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
701 nfsm_chain_check_change_info(error, &nmrep, fdnp);
702 nfsm_chain_check_change_info(error, &nmrep, tdnp);
703 /* directory attributes: if we don't get them, make sure to invalidate */
704 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
705 savedxid = xid;
706 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
707 if (error && !lockerror)
708 NATTRINVALIDATE(tdnp);
709 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
710 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
711 xid = savedxid;
712 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
713 if (error && !lockerror)
714 NATTRINVALIDATE(fdnp);
715 nfsmout:
716 nfsm_chain_cleanup(&nmreq);
717 nfsm_chain_cleanup(&nmrep);
718 if (!lockerror) {
719 fdnp->n_flag |= NMODIFIED;
720 tdnp->n_flag |= NMODIFIED;
721 nfs_node_unlock2(fdnp, tdnp);
722 }
723 return (error);
724 }
725
726 /*
727 * NFS V4 readdir RPC.
728 */
729 int
730 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
731 {
732 struct nfsmount *nmp;
733 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
734 int i, status, more_entries = 1, eof, bp_dropped = 0;
735 uint32_t nmreaddirsize, nmrsize;
736 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
737 uint64_t cookie, lastcookie, xid, savedxid;
738 struct nfsm_chain nmreq, nmrep, nmrepsave;
739 fhandle_t fh;
740 struct nfs_vattr nvattr, *nvattrp;
741 struct nfs_dir_buf_header *ndbhp;
742 struct direntry *dp;
743 char *padstart, padlen;
744 const char *tag;
745 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
746 struct timeval now;
747 struct nfsreq_secinfo_args si;
748
749 nmp = NFSTONMP(dnp);
750 if (nfs_mount_gone(nmp))
751 return (ENXIO);
752 nfsvers = nmp->nm_vers;
753 nmreaddirsize = nmp->nm_readdirsize;
754 nmrsize = nmp->nm_rsize;
755 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
756 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
757 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
758 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
759 return (EINVAL);
760 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
761
762 /*
763 * Set up attribute request for entries.
764 * For READDIRPLUS functionality, get everything.
765 * Otherwise, just get what we need for struct direntry.
766 */
767 if (rdirplus) {
768 tag = "readdirplus";
769 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
770 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
771 } else {
772 tag = "readdir";
773 NFS_CLEAR_ATTRIBUTES(entry_attrs);
774 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
775 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
776 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
777 }
778 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
779
780 /* lock to protect access to cookie verifier */
781 if ((lockerror = nfs_node_lock(dnp)))
782 return (lockerror);
783
784 /* determine cookie to use, and move dp to the right offset */
785 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
786 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
787 if (ndbhp->ndbh_count) {
788 for (i=0; i < ndbhp->ndbh_count-1; i++)
789 dp = NFS_DIRENTRY_NEXT(dp);
790 cookie = dp->d_seekoff;
791 dp = NFS_DIRENTRY_NEXT(dp);
792 } else {
793 cookie = bp->nb_lblkno;
794 /* increment with every buffer read */
795 OSAddAtomic64(1, &nfsstats.readdir_bios);
796 }
797 lastcookie = cookie;
798
799 /*
800 * The NFS client is responsible for the "." and ".." entries in the
801 * directory. So, we put them at the start of the first buffer.
802 * Don't bother for attribute directories.
803 */
804 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
805 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
806 fh.fh_len = 0;
807 fhlen = rdirplus ? fh.fh_len + 1 : 0;
808 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
809 /* "." */
810 namlen = 1;
811 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
812 if (xlen)
813 bzero(&dp->d_name[namlen+1], xlen);
814 dp->d_namlen = namlen;
815 strlcpy(dp->d_name, ".", namlen+1);
816 dp->d_fileno = dnp->n_vattr.nva_fileid;
817 dp->d_type = DT_DIR;
818 dp->d_reclen = reclen;
819 dp->d_seekoff = 1;
820 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
821 dp = NFS_DIRENTRY_NEXT(dp);
822 padlen = (char*)dp - padstart;
823 if (padlen > 0)
824 bzero(padstart, padlen);
825 if (rdirplus) /* zero out attributes */
826 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
827
828 /* ".." */
829 namlen = 2;
830 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
831 if (xlen)
832 bzero(&dp->d_name[namlen+1], xlen);
833 dp->d_namlen = namlen;
834 strlcpy(dp->d_name, "..", namlen+1);
835 if (dnp->n_parent)
836 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
837 else
838 dp->d_fileno = dnp->n_vattr.nva_fileid;
839 dp->d_type = DT_DIR;
840 dp->d_reclen = reclen;
841 dp->d_seekoff = 2;
842 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
843 dp = NFS_DIRENTRY_NEXT(dp);
844 padlen = (char*)dp - padstart;
845 if (padlen > 0)
846 bzero(padstart, padlen);
847 if (rdirplus) /* zero out attributes */
848 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
849
850 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
851 ndbhp->ndbh_count = 2;
852 }
853
854 /*
855 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
856 * the buffer is full (or we hit EOF). Then put the remainder of the
857 * results in the next buffer(s).
858 */
859 nfsm_chain_null(&nmreq);
860 nfsm_chain_null(&nmrep);
861 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
862
863 // PUTFH, GETATTR, READDIR
864 numops = 3;
865 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
866 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
867 numops--;
868 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
869 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
870 numops--;
871 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
872 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
873 numops--;
874 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
875 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
876 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
877 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
878 nfsm_chain_add_32(error, &nmreq, nmrsize);
879 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
880 nfsm_chain_build_done(error, &nmreq);
881 nfsm_assert(error, (numops == 0), EPROTO);
882 nfs_node_unlock(dnp);
883 nfsmout_if(error);
884 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
885
886 if ((lockerror = nfs_node_lock(dnp)))
887 error = lockerror;
888
889 savedxid = xid;
890 nfsm_chain_skip_tag(error, &nmrep);
891 nfsm_chain_get_32(error, &nmrep, numops);
892 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
893 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
894 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
895 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
896 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
897 nfsm_chain_get_32(error, &nmrep, more_entries);
898
899 if (!lockerror) {
900 nfs_node_unlock(dnp);
901 lockerror = ENOENT;
902 }
903 nfsmout_if(error);
904
905 if (rdirplus)
906 microuptime(&now);
907
908 /* loop through the entries packing them into the buffer */
909 while (more_entries) {
910 /* Entry: COOKIE, NAME, FATTR */
911 nfsm_chain_get_64(error, &nmrep, cookie);
912 nfsm_chain_get_32(error, &nmrep, namlen);
913 nfsmout_if(error);
914 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
915 /* we've got a big cookie, make sure flag is set */
916 lck_mtx_lock(&nmp->nm_lock);
917 nmp->nm_state |= NFSSTA_BIGCOOKIES;
918 lck_mtx_unlock(&nmp->nm_lock);
919 bigcookies = 1;
920 }
921 /* just truncate names that don't fit in direntry.d_name */
922 if (namlen <= 0) {
923 error = EBADRPC;
924 goto nfsmout;
925 }
926 if (namlen > (sizeof(dp->d_name)-1)) {
927 skiplen = namlen - sizeof(dp->d_name) + 1;
928 namlen = sizeof(dp->d_name) - 1;
929 } else {
930 skiplen = 0;
931 }
932 /* guess that fh size will be same as parent */
933 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
934 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
935 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
936 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
937 space_needed = reclen + attrlen;
938 space_free = nfs_dir_buf_freespace(bp, rdirplus);
939 if (space_needed > space_free) {
940 /*
941 * We still have entries to pack, but we've
942 * run out of room in the current buffer.
943 * So we need to move to the next buffer.
944 * The block# for the next buffer is the
945 * last cookie in the current buffer.
946 */
947 nextbuffer:
948 ndbhp->ndbh_flags |= NDB_FULL;
949 nfs_buf_release(bp, 0);
950 bp_dropped = 1;
951 bp = NULL;
952 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
953 nfsmout_if(error);
954 /* initialize buffer */
955 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
956 ndbhp->ndbh_flags = 0;
957 ndbhp->ndbh_count = 0;
958 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
959 ndbhp->ndbh_ncgen = dnp->n_ncgen;
960 space_free = nfs_dir_buf_freespace(bp, rdirplus);
961 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
962 /* increment with every buffer read */
963 OSAddAtomic64(1, &nfsstats.readdir_bios);
964 }
965 nmrepsave = nmrep;
966 dp->d_fileno = cookie; /* placeholder */
967 dp->d_seekoff = cookie;
968 dp->d_namlen = namlen;
969 dp->d_reclen = reclen;
970 dp->d_type = DT_UNKNOWN;
971 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
972 nfsmout_if(error);
973 dp->d_name[namlen] = '\0';
974 if (skiplen)
975 nfsm_chain_adv(error, &nmrep,
976 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
977 nfsmout_if(error);
978 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
979 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
980 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
981 /* we do NOT want ACLs returned to us here */
982 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
983 if (nvattrp->nva_acl) {
984 kauth_acl_free(nvattrp->nva_acl);
985 nvattrp->nva_acl = NULL;
986 }
987 }
988 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
989 /* OK, we may not have gotten all of the attributes but we will use what we can. */
990 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
991 /* set this up to look like a referral trigger */
992 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
993 }
994 error = 0;
995 }
996 /* check for more entries after this one */
997 nfsm_chain_get_32(error, &nmrep, more_entries);
998 nfsmout_if(error);
999
1000 /* Skip any "." and ".." entries returned from server. */
1001 /* Also skip any bothersome named attribute entries. */
1002 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1003 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1004 lastcookie = cookie;
1005 continue;
1006 }
1007
1008 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
1009 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1010 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
1011 dp->d_fileno = nvattrp->nva_fileid;
1012 if (rdirplus) {
1013 /* fileid is already in d_fileno, so stash xid in attrs */
1014 nvattrp->nva_fileid = savedxid;
1015 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1016 fhlen = fh.fh_len + 1;
1017 xlen = fhlen + sizeof(time_t);
1018 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1019 space_needed = reclen + attrlen;
1020 if (space_needed > space_free) {
1021 /* didn't actually have the room... move on to next buffer */
1022 nmrep = nmrepsave;
1023 goto nextbuffer;
1024 }
1025 /* pack the file handle into the record */
1026 dp->d_name[dp->d_namlen+1] = fh.fh_len;
1027 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
1028 } else {
1029 /* mark the file handle invalid */
1030 fh.fh_len = 0;
1031 fhlen = fh.fh_len + 1;
1032 xlen = fhlen + sizeof(time_t);
1033 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1034 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
1035 }
1036 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
1037 dp->d_reclen = reclen;
1038 }
1039 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1040 ndbhp->ndbh_count++;
1041 lastcookie = cookie;
1042
1043 /* advance to next direntry in buffer */
1044 dp = NFS_DIRENTRY_NEXT(dp);
1045 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1046 /* zero out the pad bytes */
1047 padlen = (char*)dp - padstart;
1048 if (padlen > 0)
1049 bzero(padstart, padlen);
1050 }
1051 /* Finally, get the eof boolean */
1052 nfsm_chain_get_32(error, &nmrep, eof);
1053 nfsmout_if(error);
1054 if (eof) {
1055 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
1056 nfs_node_lock_force(dnp);
1057 dnp->n_eofcookie = lastcookie;
1058 nfs_node_unlock(dnp);
1059 } else {
1060 more_entries = 1;
1061 }
1062 if (bp_dropped) {
1063 nfs_buf_release(bp, 0);
1064 bp = NULL;
1065 break;
1066 }
1067 if ((lockerror = nfs_node_lock(dnp)))
1068 error = lockerror;
1069 nfsmout_if(error);
1070 nfsm_chain_cleanup(&nmrep);
1071 nfsm_chain_null(&nmreq);
1072 }
1073 nfsmout:
1074 if (bp_dropped && bp)
1075 nfs_buf_release(bp, 0);
1076 if (!lockerror)
1077 nfs_node_unlock(dnp);
1078 nfsm_chain_cleanup(&nmreq);
1079 nfsm_chain_cleanup(&nmrep);
1080 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
1081 }
1082
1083 int
1084 nfs4_lookup_rpc_async(
1085 nfsnode_t dnp,
1086 char *name,
1087 int namelen,
1088 vfs_context_t ctx,
1089 struct nfsreq **reqp)
1090 {
1091 int error = 0, isdotdot = 0, nfsvers, numops;
1092 struct nfsm_chain nmreq;
1093 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1094 struct nfsmount *nmp;
1095 struct nfsreq_secinfo_args si;
1096
1097 nmp = NFSTONMP(dnp);
1098 if (nfs_mount_gone(nmp))
1099 return (ENXIO);
1100 nfsvers = nmp->nm_vers;
1101 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1102 return (EINVAL);
1103
1104 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1105 isdotdot = 1;
1106 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1107 } else {
1108 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1109 }
1110
1111 nfsm_chain_null(&nmreq);
1112
1113 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1114 numops = 5;
1115 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1116 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1117 numops--;
1118 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1119 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1120 numops--;
1121 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1122 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1123 numops--;
1124 if (isdotdot) {
1125 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1126 } else {
1127 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1128 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1129 }
1130 numops--;
1131 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1132 numops--;
1133 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1134 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1135 /* some ".zfs" directories can't handle being asked for some attributes */
1136 if ((dnp->n_flag & NISDOTZFS) && !isdotdot)
1137 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1138 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot)
1139 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1140 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's')))
1141 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1142 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1143 nfsm_chain_build_done(error, &nmreq);
1144 nfsm_assert(error, (numops == 0), EPROTO);
1145 nfsmout_if(error);
1146 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1147 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1148 nfsmout:
1149 nfsm_chain_cleanup(&nmreq);
1150 return (error);
1151 }
1152
1153
1154 int
1155 nfs4_lookup_rpc_async_finish(
1156 nfsnode_t dnp,
1157 char *name,
1158 int namelen,
1159 vfs_context_t ctx,
1160 struct nfsreq *req,
1161 u_int64_t *xidp,
1162 fhandle_t *fhp,
1163 struct nfs_vattr *nvap)
1164 {
1165 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1166 uint32_t op = NFS_OP_LOOKUP;
1167 u_int64_t xid;
1168 struct nfsmount *nmp;
1169 struct nfsm_chain nmrep;
1170
1171 nmp = NFSTONMP(dnp);
1172 if (nmp == NULL)
1173 return (ENXIO);
1174 nfsvers = nmp->nm_vers;
1175 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1176 isdotdot = 1;
1177
1178 nfsm_chain_null(&nmrep);
1179
1180 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1181
1182 if ((lockerror = nfs_node_lock(dnp)))
1183 error = lockerror;
1184 nfsm_chain_skip_tag(error, &nmrep);
1185 nfsm_chain_get_32(error, &nmrep, numops);
1186 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1187 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1188 if (xidp)
1189 *xidp = xid;
1190 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1191
1192 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1193 nfsmout_if(error || !fhp || !nvap);
1194 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1195 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1196 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1197 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1198 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1199 /* set this up to look like a referral trigger */
1200 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1201 error = 0;
1202 } else {
1203 nfsmout_if(error);
1204 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1205 }
1206 nfsmout:
1207 if (!lockerror)
1208 nfs_node_unlock(dnp);
1209 nfsm_chain_cleanup(&nmrep);
1210 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1211 /* We still need to get SECINFO to set default for mount. */
1212 /* Do so for the first LOOKUP that returns successfully. */
1213 struct nfs_sec sec;
1214
1215 sec.count = NX_MAX_SEC_FLAVORS;
1216 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1217 /* [sigh] some implementations return "illegal" error for unsupported ops */
1218 if (error == NFSERR_OP_ILLEGAL)
1219 error = 0;
1220 if (!error) {
1221 /* set our default security flavor to the first in the list */
1222 lck_mtx_lock(&nmp->nm_lock);
1223 if (sec.count)
1224 nmp->nm_auth = sec.flavors[0];
1225 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1226 lck_mtx_unlock(&nmp->nm_lock);
1227 }
1228 }
1229 return (error);
1230 }
1231
1232 int
1233 nfs4_commit_rpc(
1234 nfsnode_t np,
1235 uint64_t offset,
1236 uint64_t count,
1237 kauth_cred_t cred,
1238 uint64_t wverf)
1239 {
1240 struct nfsmount *nmp;
1241 int error = 0, lockerror, status, nfsvers, numops;
1242 u_int64_t xid, newwverf;
1243 uint32_t count32;
1244 struct nfsm_chain nmreq, nmrep;
1245 struct nfsreq_secinfo_args si;
1246
1247 nmp = NFSTONMP(np);
1248 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1249 if (nfs_mount_gone(nmp))
1250 return (ENXIO);
1251 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1252 return (EINVAL);
1253 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1254 return (0);
1255 nfsvers = nmp->nm_vers;
1256
1257 if (count > UINT32_MAX)
1258 count32 = 0;
1259 else
1260 count32 = count;
1261
1262 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1263 nfsm_chain_null(&nmreq);
1264 nfsm_chain_null(&nmrep);
1265
1266 // PUTFH, COMMIT, GETATTR
1267 numops = 3;
1268 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1269 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1270 numops--;
1271 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1272 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1273 numops--;
1274 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1275 nfsm_chain_add_64(error, &nmreq, offset);
1276 nfsm_chain_add_32(error, &nmreq, count32);
1277 numops--;
1278 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1279 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1280 nfsm_chain_build_done(error, &nmreq);
1281 nfsm_assert(error, (numops == 0), EPROTO);
1282 nfsmout_if(error);
1283 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1284 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1285
1286 if ((lockerror = nfs_node_lock(np)))
1287 error = lockerror;
1288 nfsm_chain_skip_tag(error, &nmrep);
1289 nfsm_chain_get_32(error, &nmrep, numops);
1290 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1291 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1292 nfsm_chain_get_64(error, &nmrep, newwverf);
1293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1294 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1295 if (!lockerror)
1296 nfs_node_unlock(np);
1297 nfsmout_if(error);
1298 lck_mtx_lock(&nmp->nm_lock);
1299 if (nmp->nm_verf != newwverf)
1300 nmp->nm_verf = newwverf;
1301 if (wverf != newwverf)
1302 error = NFSERR_STALEWRITEVERF;
1303 lck_mtx_unlock(&nmp->nm_lock);
1304 nfsmout:
1305 nfsm_chain_cleanup(&nmreq);
1306 nfsm_chain_cleanup(&nmrep);
1307 return (error);
1308 }
1309
1310 int
1311 nfs4_pathconf_rpc(
1312 nfsnode_t np,
1313 struct nfs_fsattr *nfsap,
1314 vfs_context_t ctx)
1315 {
1316 u_int64_t xid;
1317 int error = 0, lockerror, status, nfsvers, numops;
1318 struct nfsm_chain nmreq, nmrep;
1319 struct nfsmount *nmp = NFSTONMP(np);
1320 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1321 struct nfs_vattr nvattr;
1322 struct nfsreq_secinfo_args si;
1323
1324 if (nfs_mount_gone(nmp))
1325 return (ENXIO);
1326 nfsvers = nmp->nm_vers;
1327 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1328 return (EINVAL);
1329
1330 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1331 NVATTR_INIT(&nvattr);
1332 nfsm_chain_null(&nmreq);
1333 nfsm_chain_null(&nmrep);
1334
1335 /* NFSv4: fetch "pathconf" info for this node */
1336 // PUTFH, GETATTR
1337 numops = 2;
1338 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1339 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1340 numops--;
1341 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1342 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1343 numops--;
1344 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1345 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1346 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1347 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1348 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1349 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1350 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1351 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1352 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1353 nfsm_chain_build_done(error, &nmreq);
1354 nfsm_assert(error, (numops == 0), EPROTO);
1355 nfsmout_if(error);
1356 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1357
1358 nfsm_chain_skip_tag(error, &nmrep);
1359 nfsm_chain_get_32(error, &nmrep, numops);
1360 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1361 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1362 nfsmout_if(error);
1363 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1364 nfsmout_if(error);
1365 if ((lockerror = nfs_node_lock(np)))
1366 error = lockerror;
1367 if (!error)
1368 nfs_loadattrcache(np, &nvattr, &xid, 0);
1369 if (!lockerror)
1370 nfs_node_unlock(np);
1371 nfsmout:
1372 NVATTR_CLEANUP(&nvattr);
1373 nfsm_chain_cleanup(&nmreq);
1374 nfsm_chain_cleanup(&nmrep);
1375 return (error);
1376 }
1377
1378 int
1379 nfs4_vnop_getattr(
1380 struct vnop_getattr_args /* {
1381 struct vnodeop_desc *a_desc;
1382 vnode_t a_vp;
1383 struct vnode_attr *a_vap;
1384 vfs_context_t a_context;
1385 } */ *ap)
1386 {
1387 struct vnode_attr *vap = ap->a_vap;
1388 struct nfsmount *nmp;
1389 struct nfs_vattr nva;
1390 int error, acls, ngaflags;
1391
1392 nmp = VTONMP(ap->a_vp);
1393 if (nfs_mount_gone(nmp))
1394 return (ENXIO);
1395 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1396
1397 ngaflags = NGA_CACHED;
1398 if (VATTR_IS_ACTIVE(vap, va_acl) && acls)
1399 ngaflags |= NGA_ACL;
1400 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1401 if (error)
1402 return (error);
1403
1404 vap->va_flags |= VA_64BITOBJIDS;
1405
1406 /* copy what we have in nva to *a_vap */
1407 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1408 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1409 VATTR_RETURN(vap, va_rdev, rdev);
1410 }
1411 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
1412 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1413 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
1414 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1415 // VATTR_RETURN(vap, va_data_alloc, ???);
1416 // VATTR_RETURN(vap, va_total_size, ???);
1417 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
1418 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1419 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1420 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1421 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1422 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1423 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1424 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1425 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1426 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1427 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1428 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1429 VATTR_RETURN(vap, va_mode, 0777);
1430 else
1431 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1432 }
1433 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1434 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1435 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1436 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1437 uint32_t flags = 0;
1438 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1439 (nva.nva_flags & NFS_FFLAG_ARCHIVED))
1440 flags |= SF_ARCHIVED;
1441 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1442 (nva.nva_flags & NFS_FFLAG_HIDDEN))
1443 flags |= UF_HIDDEN;
1444 VATTR_RETURN(vap, va_flags, flags);
1445 }
1446 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1447 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1448 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1449 VATTR_SET_SUPPORTED(vap, va_create_time);
1450 }
1451 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1452 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1453 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1454 VATTR_SET_SUPPORTED(vap, va_access_time);
1455 }
1456 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1457 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1458 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1459 VATTR_SET_SUPPORTED(vap, va_modify_time);
1460 }
1461 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1462 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1463 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1464 VATTR_SET_SUPPORTED(vap, va_change_time);
1465 }
1466 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1467 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1468 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1469 VATTR_SET_SUPPORTED(vap, va_backup_time);
1470 }
1471 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
1472 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1473 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
1474 VATTR_RETURN(vap, va_type, nva.nva_type);
1475 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
1476 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1477
1478 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1479 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1480 nva.nva_acl = NULL;
1481 }
1482
1483 // other attrs we might support someday:
1484 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1485
1486 NVATTR_CLEANUP(&nva);
1487 return (error);
1488 }
1489
1490 int
1491 nfs4_setattr_rpc(
1492 nfsnode_t np,
1493 struct vnode_attr *vap,
1494 vfs_context_t ctx)
1495 {
1496 struct nfsmount *nmp = NFSTONMP(np);
1497 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1498 u_int64_t xid, nextxid;
1499 struct nfsm_chain nmreq, nmrep;
1500 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1501 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1502 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1503 nfs_stateid stateid;
1504 struct nfsreq_secinfo_args si;
1505
1506 if (nfs_mount_gone(nmp))
1507 return (ENXIO);
1508 nfsvers = nmp->nm_vers;
1509 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1510 return (EINVAL);
1511
1512 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1513 /* we don't support setting unsupported flags (duh!) */
1514 if (vap->va_active & ~VNODE_ATTR_va_flags)
1515 return (EINVAL); /* return EINVAL if other attributes also set */
1516 else
1517 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1518 }
1519
1520 /* don't bother requesting some changes if they don't look like they are changing */
1521 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid))
1522 VATTR_CLEAR_ACTIVE(vap, va_uid);
1523 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid))
1524 VATTR_CLEAR_ACTIVE(vap, va_gid);
1525 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid))
1526 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1527 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid))
1528 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1529
1530 tryagain:
1531 /* do nothing if no attributes will be sent */
1532 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1533 if (!bitmap[0] && !bitmap[1])
1534 return (0);
1535
1536 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1537 nfsm_chain_null(&nmreq);
1538 nfsm_chain_null(&nmrep);
1539
1540 /*
1541 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1542 * need to invalidate any cached ACL. And if we had an ACL cached,
1543 * we might as well also fetch the new value.
1544 */
1545 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1546 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1547 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1548 if (NACLVALID(np))
1549 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1550 NACLINVALIDATE(np);
1551 }
1552
1553 // PUTFH, SETATTR, GETATTR
1554 numops = 3;
1555 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1556 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1557 numops--;
1558 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1559 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1560 numops--;
1561 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1562 if (VATTR_IS_ACTIVE(vap, va_data_size))
1563 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1564 else
1565 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1566 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1567 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1568 numops--;
1569 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1570 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1571 nfsm_chain_build_done(error, &nmreq);
1572 nfsm_assert(error, (numops == 0), EPROTO);
1573 nfsmout_if(error);
1574 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1575
1576 if ((lockerror = nfs_node_lock(np)))
1577 error = lockerror;
1578 nfsm_chain_skip_tag(error, &nmrep);
1579 nfsm_chain_get_32(error, &nmrep, numops);
1580 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1581 nfsmout_if(error);
1582 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1583 nfsmout_if(error == EBADRPC);
1584 setattr_error = error;
1585 error = 0;
1586 bmlen = NFS_ATTR_BITMAP_LEN;
1587 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1588 if (!error) {
1589 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
1590 microuptime(&np->n_lastio);
1591 nfs_vattr_set_supported(setbitmap, vap);
1592 error = setattr_error;
1593 }
1594 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1595 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1596 if (error)
1597 NATTRINVALIDATE(np);
1598 /*
1599 * We just changed the attributes and we want to make sure that we
1600 * see the latest attributes. Get the next XID. If it's not the
1601 * next XID after the SETATTR XID, then it's possible that another
1602 * RPC was in flight at the same time and it might put stale attributes
1603 * in the cache. In that case, we invalidate the attributes and set
1604 * the attribute cache XID to guarantee that newer attributes will
1605 * get loaded next.
1606 */
1607 nextxid = 0;
1608 nfs_get_xid(&nextxid);
1609 if (nextxid != (xid + 1)) {
1610 np->n_xid = nextxid;
1611 NATTRINVALIDATE(np);
1612 }
1613 nfsmout:
1614 if (!lockerror)
1615 nfs_node_unlock(np);
1616 nfsm_chain_cleanup(&nmreq);
1617 nfsm_chain_cleanup(&nmrep);
1618 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1619 /*
1620 * Some server's may not like ACL/mode combos that get sent.
1621 * If it looks like that's what the server choked on, try setting
1622 * just the ACL and not the mode (unless it looks like everything
1623 * but mode was already successfully set).
1624 */
1625 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1626 ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) {
1627 VATTR_CLEAR_ACTIVE(vap, va_mode);
1628 error = 0;
1629 goto tryagain;
1630 }
1631 }
1632 return (error);
1633 }
1634
1635 /*
1636 * Wait for any pending recovery to complete.
1637 */
1638 int
1639 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1640 {
1641 struct timespec ts = { 1, 0 };
1642 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1643
1644 lck_mtx_lock(&nmp->nm_lock);
1645 while (nmp->nm_state & NFSSTA_RECOVER) {
1646 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1647 break;
1648 nfs_mount_sock_thread_wake(nmp);
1649 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1650 slpflag = 0;
1651 }
1652 lck_mtx_unlock(&nmp->nm_lock);
1653
1654 return (error);
1655 }
1656
1657 /*
1658 * We're about to use/manipulate NFS mount's open/lock state.
1659 * Wait for any pending state recovery to complete, then
1660 * mark the state as being in use (which will hold off
1661 * the recovery thread until we're done).
1662 */
1663 int
1664 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1665 {
1666 struct timespec ts = { 1, 0 };
1667 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1668
1669 if (nfs_mount_gone(nmp))
1670 return (ENXIO);
1671 lck_mtx_lock(&nmp->nm_lock);
1672 if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
1673 lck_mtx_unlock(&nmp->nm_lock);
1674 return (ENXIO);
1675 }
1676 while (nmp->nm_state & NFSSTA_RECOVER) {
1677 if ((error = nfs_sigintr(nmp, NULL, thd, 1)))
1678 break;
1679 nfs_mount_sock_thread_wake(nmp);
1680 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1681 slpflag = 0;
1682 }
1683 if (!error)
1684 nmp->nm_stateinuse++;
1685 lck_mtx_unlock(&nmp->nm_lock);
1686
1687 return (error);
1688 }
1689
1690 /*
1691 * We're done using/manipulating the NFS mount's open/lock
1692 * state. If the given error indicates that recovery should
1693 * be performed, we'll initiate recovery.
1694 */
1695 int
1696 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1697 {
1698 int restart = nfs_mount_state_error_should_restart(error);
1699
1700 if (nfs_mount_gone(nmp))
1701 return (restart);
1702 lck_mtx_lock(&nmp->nm_lock);
1703 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1704 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1705 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1706 nfs_need_recover(nmp, error);
1707 }
1708 if (nmp->nm_stateinuse > 0)
1709 nmp->nm_stateinuse--;
1710 else
1711 panic("NFS mount state in use count underrun");
1712 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1713 wakeup(&nmp->nm_stateinuse);
1714 lck_mtx_unlock(&nmp->nm_lock);
1715 if (error == NFSERR_GRACE)
1716 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1717
1718 return (restart);
1719 }
1720
1721 /*
1722 * Does the error mean we should restart/redo a state-related operation?
1723 */
1724 int
1725 nfs_mount_state_error_should_restart(int error)
1726 {
1727 switch (error) {
1728 case NFSERR_STALE_STATEID:
1729 case NFSERR_STALE_CLIENTID:
1730 case NFSERR_ADMIN_REVOKED:
1731 case NFSERR_EXPIRED:
1732 case NFSERR_OLD_STATEID:
1733 case NFSERR_BAD_STATEID:
1734 case NFSERR_GRACE:
1735 return (1);
1736 }
1737 return (0);
1738 }
1739
1740 /*
1741 * In some cases we may want to limit how many times we restart a
1742 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1743 * Base the limit on the lease (as long as it's not too short).
1744 */
1745 uint
1746 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1747 {
1748 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1749 }
1750
1751 /*
1752 * Does the error mean we probably lost a delegation?
1753 */
1754 int
1755 nfs_mount_state_error_delegation_lost(int error)
1756 {
1757 switch (error) {
1758 case NFSERR_STALE_STATEID:
1759 case NFSERR_ADMIN_REVOKED:
1760 case NFSERR_EXPIRED:
1761 case NFSERR_OLD_STATEID:
1762 case NFSERR_BAD_STATEID:
1763 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1764 return (1);
1765 }
1766 return (0);
1767 }
1768
1769
1770 /*
1771 * Mark an NFS node's open state as busy.
1772 */
1773 int
1774 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1775 {
1776 struct nfsmount *nmp;
1777 struct timespec ts = {2, 0};
1778 int error = 0, slpflag;
1779
1780 nmp = NFSTONMP(np);
1781 if (nfs_mount_gone(nmp))
1782 return (ENXIO);
1783 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1784
1785 lck_mtx_lock(&np->n_openlock);
1786 while (np->n_openflags & N_OPENBUSY) {
1787 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1788 break;
1789 np->n_openflags |= N_OPENWANT;
1790 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1791 slpflag = 0;
1792 }
1793 if (!error)
1794 np->n_openflags |= N_OPENBUSY;
1795 lck_mtx_unlock(&np->n_openlock);
1796
1797 return (error);
1798 }
1799
1800 /*
1801 * Clear an NFS node's open state busy flag and wake up
1802 * anyone wanting it.
1803 */
1804 void
1805 nfs_open_state_clear_busy(nfsnode_t np)
1806 {
1807 int wanted;
1808
1809 lck_mtx_lock(&np->n_openlock);
1810 if (!(np->n_openflags & N_OPENBUSY))
1811 panic("nfs_open_state_clear_busy");
1812 wanted = (np->n_openflags & N_OPENWANT);
1813 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1814 lck_mtx_unlock(&np->n_openlock);
1815 if (wanted)
1816 wakeup(&np->n_openflags);
1817 }
1818
1819 /*
1820 * Search a mount's open owner list for the owner for this credential.
1821 * If not found and "alloc" is set, then allocate a new one.
1822 */
1823 struct nfs_open_owner *
1824 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1825 {
1826 uid_t uid = kauth_cred_getuid(cred);
1827 struct nfs_open_owner *noop, *newnoop = NULL;
1828
1829 tryagain:
1830 lck_mtx_lock(&nmp->nm_lock);
1831 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1832 if (kauth_cred_getuid(noop->noo_cred) == uid)
1833 break;
1834 }
1835
1836 if (!noop && !newnoop && alloc) {
1837 lck_mtx_unlock(&nmp->nm_lock);
1838 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1839 if (!newnoop)
1840 return (NULL);
1841 bzero(newnoop, sizeof(*newnoop));
1842 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1843 newnoop->noo_mount = nmp;
1844 kauth_cred_ref(cred);
1845 newnoop->noo_cred = cred;
1846 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1847 TAILQ_INIT(&newnoop->noo_opens);
1848 goto tryagain;
1849 }
1850 if (!noop && newnoop) {
1851 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1852 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1853 noop = newnoop;
1854 }
1855 lck_mtx_unlock(&nmp->nm_lock);
1856
1857 if (newnoop && (noop != newnoop))
1858 nfs_open_owner_destroy(newnoop);
1859
1860 if (noop)
1861 nfs_open_owner_ref(noop);
1862
1863 return (noop);
1864 }
1865
1866 /*
1867 * destroy an open owner that's no longer needed
1868 */
1869 void
1870 nfs_open_owner_destroy(struct nfs_open_owner *noop)
1871 {
1872 if (noop->noo_cred)
1873 kauth_cred_unref(&noop->noo_cred);
1874 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1875 FREE(noop, M_TEMP);
1876 }
1877
1878 /*
1879 * acquire a reference count on an open owner
1880 */
1881 void
1882 nfs_open_owner_ref(struct nfs_open_owner *noop)
1883 {
1884 lck_mtx_lock(&noop->noo_lock);
1885 noop->noo_refcnt++;
1886 lck_mtx_unlock(&noop->noo_lock);
1887 }
1888
1889 /*
1890 * drop a reference count on an open owner and destroy it if
1891 * it is no longer referenced and no longer on the mount's list.
1892 */
1893 void
1894 nfs_open_owner_rele(struct nfs_open_owner *noop)
1895 {
1896 lck_mtx_lock(&noop->noo_lock);
1897 if (noop->noo_refcnt < 1)
1898 panic("nfs_open_owner_rele: no refcnt");
1899 noop->noo_refcnt--;
1900 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1901 panic("nfs_open_owner_rele: busy");
1902 /* XXX we may potentially want to clean up idle/unused open owner structures */
1903 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1904 lck_mtx_unlock(&noop->noo_lock);
1905 return;
1906 }
1907 /* owner is no longer referenced or linked to mount, so destroy it */
1908 lck_mtx_unlock(&noop->noo_lock);
1909 nfs_open_owner_destroy(noop);
1910 }
1911
1912 /*
1913 * Mark an open owner as busy because we are about to
1914 * start an operation that uses and updates open owner state.
1915 */
1916 int
1917 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1918 {
1919 struct nfsmount *nmp;
1920 struct timespec ts = {2, 0};
1921 int error = 0, slpflag;
1922
1923 nmp = noop->noo_mount;
1924 if (nfs_mount_gone(nmp))
1925 return (ENXIO);
1926 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1927
1928 lck_mtx_lock(&noop->noo_lock);
1929 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1930 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1931 break;
1932 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1933 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
1934 slpflag = 0;
1935 }
1936 if (!error)
1937 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1938 lck_mtx_unlock(&noop->noo_lock);
1939
1940 return (error);
1941 }
1942
1943 /*
1944 * Clear the busy flag on an open owner and wake up anyone waiting
1945 * to mark it busy.
1946 */
1947 void
1948 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1949 {
1950 int wanted;
1951
1952 lck_mtx_lock(&noop->noo_lock);
1953 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1954 panic("nfs_open_owner_clear_busy");
1955 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1956 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1957 lck_mtx_unlock(&noop->noo_lock);
1958 if (wanted)
1959 wakeup(noop);
1960 }
1961
1962 /*
1963 * Given an open/lock owner and an error code, increment the
1964 * sequence ID if appropriate.
1965 */
1966 void
1967 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1968 {
1969 switch (error) {
1970 case NFSERR_STALE_CLIENTID:
1971 case NFSERR_STALE_STATEID:
1972 case NFSERR_OLD_STATEID:
1973 case NFSERR_BAD_STATEID:
1974 case NFSERR_BAD_SEQID:
1975 case NFSERR_BADXDR:
1976 case NFSERR_RESOURCE:
1977 case NFSERR_NOFILEHANDLE:
1978 /* do not increment the open seqid on these errors */
1979 return;
1980 }
1981 if (noop)
1982 noop->noo_seqid++;
1983 if (nlop)
1984 nlop->nlo_seqid++;
1985 }
1986
1987 /*
1988 * Search a node's open file list for any conflicts with this request.
1989 * Also find this open owner's open file structure.
1990 * If not found and "alloc" is set, then allocate one.
1991 */
1992 int
1993 nfs_open_file_find(
1994 nfsnode_t np,
1995 struct nfs_open_owner *noop,
1996 struct nfs_open_file **nofpp,
1997 uint32_t accessMode,
1998 uint32_t denyMode,
1999 int alloc)
2000 {
2001 *nofpp = NULL;
2002 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2003 }
2004
2005 /*
2006 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2007 * if an existing one is not found. This is used in "create" scenarios to
2008 * officially add the provisional nofp to the node once the node is created.
2009 */
2010 int
2011 nfs_open_file_find_internal(
2012 nfsnode_t np,
2013 struct nfs_open_owner *noop,
2014 struct nfs_open_file **nofpp,
2015 uint32_t accessMode,
2016 uint32_t denyMode,
2017 int alloc)
2018 {
2019 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2020
2021 if (!np)
2022 goto alloc;
2023 tryagain:
2024 lck_mtx_lock(&np->n_openlock);
2025 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2026 if (nofp2->nof_owner == noop) {
2027 nofp = nofp2;
2028 if (!accessMode)
2029 break;
2030 }
2031 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2032 /* This request conflicts with an existing open on this client. */
2033 lck_mtx_unlock(&np->n_openlock);
2034 return (EACCES);
2035 }
2036 }
2037
2038 /*
2039 * If this open owner doesn't have an open
2040 * file structure yet, we create one for it.
2041 */
2042 if (!nofp && !*nofpp && !newnofp && alloc) {
2043 lck_mtx_unlock(&np->n_openlock);
2044 alloc:
2045 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2046 if (!newnofp)
2047 return (ENOMEM);
2048 bzero(newnofp, sizeof(*newnofp));
2049 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2050 newnofp->nof_owner = noop;
2051 nfs_open_owner_ref(noop);
2052 newnofp->nof_np = np;
2053 lck_mtx_lock(&noop->noo_lock);
2054 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2055 lck_mtx_unlock(&noop->noo_lock);
2056 if (np)
2057 goto tryagain;
2058 }
2059 if (!nofp) {
2060 if (*nofpp) {
2061 (*nofpp)->nof_np = np;
2062 nofp = *nofpp;
2063 } else {
2064 nofp = newnofp;
2065 }
2066 if (nofp && np)
2067 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2068 }
2069 if (np)
2070 lck_mtx_unlock(&np->n_openlock);
2071
2072 if (alloc && newnofp && (nofp != newnofp))
2073 nfs_open_file_destroy(newnofp);
2074
2075 *nofpp = nofp;
2076 return (nofp ? 0 : ESRCH);
2077 }
2078
2079 /*
2080 * Destroy an open file structure.
2081 */
2082 void
2083 nfs_open_file_destroy(struct nfs_open_file *nofp)
2084 {
2085 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2086 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2087 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2088 nfs_open_owner_rele(nofp->nof_owner);
2089 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2090 FREE(nofp, M_TEMP);
2091 }
2092
2093 /*
2094 * Mark an open file as busy because we are about to
2095 * start an operation that uses and updates open file state.
2096 */
2097 int
2098 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2099 {
2100 struct nfsmount *nmp;
2101 struct timespec ts = {2, 0};
2102 int error = 0, slpflag;
2103
2104 nmp = nofp->nof_owner->noo_mount;
2105 if (nfs_mount_gone(nmp))
2106 return (ENXIO);
2107 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2108
2109 lck_mtx_lock(&nofp->nof_lock);
2110 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2111 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2112 break;
2113 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2114 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2115 slpflag = 0;
2116 }
2117 if (!error)
2118 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2119 lck_mtx_unlock(&nofp->nof_lock);
2120
2121 return (error);
2122 }
2123
2124 /*
2125 * Clear the busy flag on an open file and wake up anyone waiting
2126 * to mark it busy.
2127 */
2128 void
2129 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2130 {
2131 int wanted;
2132
2133 lck_mtx_lock(&nofp->nof_lock);
2134 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
2135 panic("nfs_open_file_clear_busy");
2136 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2137 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
2138 lck_mtx_unlock(&nofp->nof_lock);
2139 if (wanted)
2140 wakeup(nofp);
2141 }
2142
2143 /*
2144 * Add the open state for the given access/deny modes to this open file.
2145 */
2146 void
2147 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2148 {
2149 lck_mtx_lock(&nofp->nof_lock);
2150 nofp->nof_access |= accessMode;
2151 nofp->nof_deny |= denyMode;
2152
2153 if (delegated) {
2154 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2155 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2156 nofp->nof_d_r++;
2157 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2158 nofp->nof_d_w++;
2159 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2160 nofp->nof_d_rw++;
2161 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2162 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2163 nofp->nof_d_r_dw++;
2164 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2165 nofp->nof_d_w_dw++;
2166 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2167 nofp->nof_d_rw_dw++;
2168 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2169 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2170 nofp->nof_d_r_drw++;
2171 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2172 nofp->nof_d_w_drw++;
2173 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2174 nofp->nof_d_rw_drw++;
2175 }
2176 } else {
2177 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2178 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2179 nofp->nof_r++;
2180 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2181 nofp->nof_w++;
2182 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2183 nofp->nof_rw++;
2184 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2185 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2186 nofp->nof_r_dw++;
2187 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2188 nofp->nof_w_dw++;
2189 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2190 nofp->nof_rw_dw++;
2191 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2192 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2193 nofp->nof_r_drw++;
2194 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2195 nofp->nof_w_drw++;
2196 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2197 nofp->nof_rw_drw++;
2198 }
2199 }
2200
2201 nofp->nof_opencnt++;
2202 lck_mtx_unlock(&nofp->nof_lock);
2203 }
2204
2205 /*
2206 * Find which particular open combo will be closed and report what
2207 * the new modes will be and whether the open was delegated.
2208 */
2209 void
2210 nfs_open_file_remove_open_find(
2211 struct nfs_open_file *nofp,
2212 uint32_t accessMode,
2213 uint32_t denyMode,
2214 uint32_t *newAccessMode,
2215 uint32_t *newDenyMode,
2216 int *delegated)
2217 {
2218 /*
2219 * Calculate new modes: a mode bit gets removed when there's only
2220 * one count in all the corresponding counts
2221 */
2222 *newAccessMode = nofp->nof_access;
2223 *newDenyMode = nofp->nof_deny;
2224
2225 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2226 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2227 ((nofp->nof_r + nofp->nof_d_r +
2228 nofp->nof_rw + nofp->nof_d_rw +
2229 nofp->nof_r_dw + nofp->nof_d_r_dw +
2230 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2231 nofp->nof_r_drw + nofp->nof_d_r_drw +
2232 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2233 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2234 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2235 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2236 ((nofp->nof_w + nofp->nof_d_w +
2237 nofp->nof_rw + nofp->nof_d_rw +
2238 nofp->nof_w_dw + nofp->nof_d_w_dw +
2239 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2240 nofp->nof_w_drw + nofp->nof_d_w_drw +
2241 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2242 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2243 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2244 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2245 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2246 nofp->nof_w_drw + nofp->nof_d_w_drw +
2247 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1))
2248 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2249 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2250 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2251 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2252 nofp->nof_w_drw + nofp->nof_d_w_drw +
2253 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2254 nofp->nof_r_dw + nofp->nof_d_r_dw +
2255 nofp->nof_w_dw + nofp->nof_d_w_dw +
2256 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2257 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2258
2259 /* Find the corresponding open access/deny mode counter. */
2260 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2261 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2262 *delegated = (nofp->nof_d_r != 0);
2263 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2264 *delegated = (nofp->nof_d_w != 0);
2265 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2266 *delegated = (nofp->nof_d_rw != 0);
2267 else
2268 *delegated = 0;
2269 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2270 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2271 *delegated = (nofp->nof_d_r_dw != 0);
2272 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2273 *delegated = (nofp->nof_d_w_dw != 0);
2274 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2275 *delegated = (nofp->nof_d_rw_dw != 0);
2276 else
2277 *delegated = 0;
2278 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2279 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2280 *delegated = (nofp->nof_d_r_drw != 0);
2281 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2282 *delegated = (nofp->nof_d_w_drw != 0);
2283 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2284 *delegated = (nofp->nof_d_rw_drw != 0);
2285 else
2286 *delegated = 0;
2287 }
2288 }
2289
2290 /*
2291 * Remove the open state for the given access/deny modes to this open file.
2292 */
2293 void
2294 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2295 {
2296 uint32_t newAccessMode, newDenyMode;
2297 int delegated = 0;
2298
2299 lck_mtx_lock(&nofp->nof_lock);
2300 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2301
2302 /* Decrement the corresponding open access/deny mode counter. */
2303 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2304 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2305 if (delegated) {
2306 if (nofp->nof_d_r == 0)
2307 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2308 else
2309 nofp->nof_d_r--;
2310 } else {
2311 if (nofp->nof_r == 0)
2312 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2313 else
2314 nofp->nof_r--;
2315 }
2316 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2317 if (delegated) {
2318 if (nofp->nof_d_w == 0)
2319 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2320 else
2321 nofp->nof_d_w--;
2322 } else {
2323 if (nofp->nof_w == 0)
2324 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2325 else
2326 nofp->nof_w--;
2327 }
2328 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2329 if (delegated) {
2330 if (nofp->nof_d_rw == 0)
2331 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2332 else
2333 nofp->nof_d_rw--;
2334 } else {
2335 if (nofp->nof_rw == 0)
2336 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2337 else
2338 nofp->nof_rw--;
2339 }
2340 }
2341 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2342 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2343 if (delegated) {
2344 if (nofp->nof_d_r_dw == 0)
2345 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2346 else
2347 nofp->nof_d_r_dw--;
2348 } else {
2349 if (nofp->nof_r_dw == 0)
2350 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2351 else
2352 nofp->nof_r_dw--;
2353 }
2354 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2355 if (delegated) {
2356 if (nofp->nof_d_w_dw == 0)
2357 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2358 else
2359 nofp->nof_d_w_dw--;
2360 } else {
2361 if (nofp->nof_w_dw == 0)
2362 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2363 else
2364 nofp->nof_w_dw--;
2365 }
2366 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2367 if (delegated) {
2368 if (nofp->nof_d_rw_dw == 0)
2369 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2370 else
2371 nofp->nof_d_rw_dw--;
2372 } else {
2373 if (nofp->nof_rw_dw == 0)
2374 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2375 else
2376 nofp->nof_rw_dw--;
2377 }
2378 }
2379 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2380 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2381 if (delegated) {
2382 if (nofp->nof_d_r_drw == 0)
2383 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2384 else
2385 nofp->nof_d_r_drw--;
2386 } else {
2387 if (nofp->nof_r_drw == 0)
2388 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2389 else
2390 nofp->nof_r_drw--;
2391 }
2392 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2393 if (delegated) {
2394 if (nofp->nof_d_w_drw == 0)
2395 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2396 else
2397 nofp->nof_d_w_drw--;
2398 } else {
2399 if (nofp->nof_w_drw == 0)
2400 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2401 else
2402 nofp->nof_w_drw--;
2403 }
2404 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2405 if (delegated) {
2406 if (nofp->nof_d_rw_drw == 0)
2407 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2408 else
2409 nofp->nof_d_rw_drw--;
2410 } else {
2411 if (nofp->nof_rw_drw == 0)
2412 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2413 else
2414 nofp->nof_rw_drw--;
2415 }
2416 }
2417 }
2418
2419 /* update the modes */
2420 nofp->nof_access = newAccessMode;
2421 nofp->nof_deny = newDenyMode;
2422 nofp->nof_opencnt--;
2423 lck_mtx_unlock(&nofp->nof_lock);
2424 }
2425
2426
2427 /*
2428 * Get the current (delegation, lock, open, default) stateid for this node.
2429 * If node has a delegation, use that stateid.
2430 * If pid has a lock, use the lockowner's stateid.
2431 * Or use the open file's stateid.
2432 * If no open file, use a default stateid of all ones.
2433 */
2434 void
2435 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2436 {
2437 struct nfsmount *nmp = NFSTONMP(np);
2438 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2439 struct nfs_open_owner *noop = NULL;
2440 struct nfs_open_file *nofp = NULL;
2441 struct nfs_lock_owner *nlop = NULL;
2442 nfs_stateid *s = NULL;
2443
2444 if (np->n_openflags & N_DELEG_MASK) {
2445 s = &np->n_dstateid;
2446 } else {
2447 if (p)
2448 nlop = nfs_lock_owner_find(np, p, 0);
2449 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2450 /* we hold locks, use lock stateid */
2451 s = &nlop->nlo_stateid;
2452 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2453 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2454 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2455 nofp->nof_access) {
2456 /* we (should) have the file open, use open stateid */
2457 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
2458 nfs4_reopen(nofp, thd);
2459 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2460 s = &nofp->nof_stateid;
2461 }
2462 }
2463
2464 if (s) {
2465 sid->seqid = s->seqid;
2466 sid->other[0] = s->other[0];
2467 sid->other[1] = s->other[1];
2468 sid->other[2] = s->other[2];
2469 } else {
2470 /* named attributes may not have a stateid for reads, so don't complain for them */
2471 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
2472 NP(np, "nfs_get_stateid: no stateid");
2473 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2474 }
2475 if (nlop)
2476 nfs_lock_owner_rele(nlop);
2477 if (noop)
2478 nfs_open_owner_rele(noop);
2479 }
2480
2481
2482 /*
2483 * When we have a delegation, we may be able to perform the OPEN locally.
2484 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2485 */
2486 int
2487 nfs4_open_delegated(
2488 nfsnode_t np,
2489 struct nfs_open_file *nofp,
2490 uint32_t accessMode,
2491 uint32_t denyMode,
2492 vfs_context_t ctx)
2493 {
2494 int error = 0, ismember, readtoo = 0, authorized = 0;
2495 uint32_t action;
2496 struct kauth_acl_eval eval;
2497 kauth_cred_t cred = vfs_context_ucred(ctx);
2498
2499 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2500 /*
2501 * Try to open it for read access too,
2502 * so the buffer cache can read data.
2503 */
2504 readtoo = 1;
2505 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2506 }
2507
2508 tryagain:
2509 action = 0;
2510 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ)
2511 action |= KAUTH_VNODE_READ_DATA;
2512 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE)
2513 action |= KAUTH_VNODE_WRITE_DATA;
2514
2515 /* evaluate ACE (if we have one) */
2516 if (np->n_dace.ace_flags) {
2517 eval.ae_requested = action;
2518 eval.ae_acl = &np->n_dace;
2519 eval.ae_count = 1;
2520 eval.ae_options = 0;
2521 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred))
2522 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2523 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2524 if (!error && ismember)
2525 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2526
2527 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2528 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2529 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2530 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2531
2532 error = kauth_acl_evaluate(cred, &eval);
2533
2534 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW))
2535 authorized = 1;
2536 }
2537
2538 if (!authorized) {
2539 /* need to ask the server via ACCESS */
2540 struct vnop_access_args naa;
2541 naa.a_desc = &vnop_access_desc;
2542 naa.a_vp = NFSTOV(np);
2543 naa.a_action = action;
2544 naa.a_context = ctx;
2545 if (!(error = nfs_vnop_access(&naa)))
2546 authorized = 1;
2547 }
2548
2549 if (!authorized) {
2550 if (readtoo) {
2551 /* try again without the extra read access */
2552 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2553 readtoo = 0;
2554 goto tryagain;
2555 }
2556 return (error ? error : EACCES);
2557 }
2558
2559 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2560
2561 return (0);
2562 }
2563
2564
2565 /*
2566 * Open a file with the given access/deny modes.
2567 *
2568 * If we have a delegation, we may be able to handle the open locally.
2569 * Otherwise, we will always send the open RPC even if this open's mode is
2570 * a subset of all the existing opens. This makes sure that we will always
2571 * be able to do a downgrade to any of the open modes.
2572 *
2573 * Note: local conflicts should have already been checked in nfs_open_file_find().
2574 */
2575 int
2576 nfs4_open(
2577 nfsnode_t np,
2578 struct nfs_open_file *nofp,
2579 uint32_t accessMode,
2580 uint32_t denyMode,
2581 vfs_context_t ctx)
2582 {
2583 vnode_t vp = NFSTOV(np);
2584 vnode_t dvp = NULL;
2585 struct componentname cn;
2586 const char *vname = NULL;
2587 size_t namelen;
2588 char smallname[128];
2589 char *filename = NULL;
2590 int error = 0, readtoo = 0;
2591
2592 /*
2593 * We can handle the OPEN ourselves if we have a delegation,
2594 * unless it's a read delegation and the open is asking for
2595 * either write access or deny read. We also don't bother to
2596 * use the delegation if it's being returned.
2597 */
2598 if (np->n_openflags & N_DELEG_MASK) {
2599 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
2600 return (error);
2601 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2602 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2603 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2604 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2605 nfs_open_state_clear_busy(np);
2606 return (error);
2607 }
2608 nfs_open_state_clear_busy(np);
2609 }
2610
2611 /*
2612 * [sigh] We can't trust VFS to get the parent right for named
2613 * attribute nodes. (It likes to reparent the nodes after we've
2614 * created them.) Luckily we can probably get the right parent
2615 * from the n_parent we have stashed away.
2616 */
2617 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2618 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
2619 dvp = NULL;
2620 if (!dvp)
2621 dvp = vnode_getparent(vp);
2622 vname = vnode_getname(vp);
2623 if (!dvp || !vname) {
2624 if (!error)
2625 error = EIO;
2626 goto out;
2627 }
2628 filename = &smallname[0];
2629 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2630 if (namelen >= sizeof(smallname)) {
2631 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
2632 if (!filename) {
2633 error = ENOMEM;
2634 goto out;
2635 }
2636 snprintf(filename, namelen+1, "%s", vname);
2637 }
2638 bzero(&cn, sizeof(cn));
2639 cn.cn_nameptr = filename;
2640 cn.cn_namelen = namelen;
2641
2642 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2643 /*
2644 * Try to open it for read access too,
2645 * so the buffer cache can read data.
2646 */
2647 readtoo = 1;
2648 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2649 }
2650 tryagain:
2651 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2652 if (error) {
2653 if (!nfs_mount_state_error_should_restart(error) &&
2654 (error != EINTR) && (error != ERESTART) && readtoo) {
2655 /* try again without the extra read access */
2656 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2657 readtoo = 0;
2658 goto tryagain;
2659 }
2660 goto out;
2661 }
2662 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2663 out:
2664 if (filename && (filename != &smallname[0]))
2665 FREE(filename, M_TEMP);
2666 if (vname)
2667 vnode_putname(vname);
2668 if (dvp != NULLVP)
2669 vnode_put(dvp);
2670 return (error);
2671 }
2672
2673 int
2674 nfs_vnop_mmap(
2675 struct vnop_mmap_args /* {
2676 struct vnodeop_desc *a_desc;
2677 vnode_t a_vp;
2678 int a_fflags;
2679 vfs_context_t a_context;
2680 } */ *ap)
2681 {
2682 vfs_context_t ctx = ap->a_context;
2683 vnode_t vp = ap->a_vp;
2684 nfsnode_t np = VTONFS(vp);
2685 int error = 0, accessMode, denyMode, delegated;
2686 struct nfsmount *nmp;
2687 struct nfs_open_owner *noop = NULL;
2688 struct nfs_open_file *nofp = NULL;
2689
2690 nmp = VTONMP(vp);
2691 if (nfs_mount_gone(nmp))
2692 return (ENXIO);
2693
2694 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2695 return (EINVAL);
2696 if (np->n_flag & NREVOKE)
2697 return (EIO);
2698
2699 /*
2700 * fflags contains some combination of: PROT_READ, PROT_WRITE
2701 * Since it's not possible to mmap() without having the file open for reading,
2702 * read access is always there (regardless if PROT_READ is not set).
2703 */
2704 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2705 if (ap->a_fflags & PROT_WRITE)
2706 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2707 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2708
2709 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2710 if (!noop)
2711 return (ENOMEM);
2712
2713 restart:
2714 error = nfs_mount_state_in_use_start(nmp, NULL);
2715 if (error) {
2716 nfs_open_owner_rele(noop);
2717 return (error);
2718 }
2719 if (np->n_flag & NREVOKE) {
2720 error = EIO;
2721 nfs_mount_state_in_use_end(nmp, 0);
2722 nfs_open_owner_rele(noop);
2723 return (error);
2724 }
2725
2726 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2727 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2728 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2729 error = EPERM;
2730 }
2731 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2732 nfs_mount_state_in_use_end(nmp, 0);
2733 error = nfs4_reopen(nofp, NULL);
2734 nofp = NULL;
2735 if (!error)
2736 goto restart;
2737 }
2738 if (!error)
2739 error = nfs_open_file_set_busy(nofp, NULL);
2740 if (error) {
2741 nofp = NULL;
2742 goto out;
2743 }
2744
2745 /*
2746 * The open reference for mmap must mirror an existing open because
2747 * we may need to reclaim it after the file is closed.
2748 * So grab another open count matching the accessMode passed in.
2749 * If we already had an mmap open, prefer read/write without deny mode.
2750 * This means we may have to drop the current mmap open first.
2751 *
2752 * N.B. We should have an open for the mmap, because, mmap was
2753 * called on an open descriptor, or we've created an open for read
2754 * from reading the first page for execve. However, if we piggy
2755 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2756 * that open may have closed.
2757 */
2758
2759 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2760 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2761 /* We shouldn't get here. We've already open the file for execve */
2762 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2763 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2764 }
2765 /*
2766 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2767 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2768 */
2769 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2770 /* not asking for just read access -> fail */
2771 error = EPERM;
2772 goto out;
2773 }
2774 /* we don't have the file open, so open it for read access */
2775 if (nmp->nm_vers < NFS_VER4) {
2776 /* NFS v2/v3 opens are always allowed - so just add it. */
2777 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
2778 error = 0;
2779 } else {
2780 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
2781 }
2782 if (!error)
2783 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
2784 if (error)
2785 goto out;
2786 }
2787
2788 /* determine deny mode for open */
2789 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2790 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2791 delegated = 1;
2792 if (nofp->nof_d_rw)
2793 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2794 else if (nofp->nof_d_rw_dw)
2795 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2796 else if (nofp->nof_d_rw_drw)
2797 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2798 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2799 delegated = 0;
2800 if (nofp->nof_rw)
2801 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2802 else if (nofp->nof_rw_dw)
2803 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2804 else if (nofp->nof_rw_drw)
2805 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2806 } else {
2807 error = EPERM;
2808 }
2809 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2810 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
2811 delegated = 1;
2812 if (nofp->nof_d_r)
2813 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2814 else if (nofp->nof_d_r_dw)
2815 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2816 else if (nofp->nof_d_r_drw)
2817 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2818 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
2819 delegated = 0;
2820 if (nofp->nof_r)
2821 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2822 else if (nofp->nof_r_dw)
2823 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2824 else if (nofp->nof_r_drw)
2825 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2826 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2827 /*
2828 * This clause and the one below is to co-opt a read write access
2829 * for a read only mmaping. We probably got here in that an
2830 * existing rw open for an executable file already exists.
2831 */
2832 delegated = 1;
2833 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2834 if (nofp->nof_d_rw)
2835 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2836 else if (nofp->nof_d_rw_dw)
2837 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2838 else if (nofp->nof_d_rw_drw)
2839 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2840 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2841 delegated = 0;
2842 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2843 if (nofp->nof_rw)
2844 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2845 else if (nofp->nof_rw_dw)
2846 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2847 else if (nofp->nof_rw_drw)
2848 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2849 } else {
2850 error = EPERM;
2851 }
2852 }
2853 if (error) /* mmap mode without proper open mode */
2854 goto out;
2855
2856 /*
2857 * If the existing mmap access is more than the new access OR the
2858 * existing access is the same and the existing deny mode is less,
2859 * then we'll stick with the existing mmap open mode.
2860 */
2861 if ((nofp->nof_mmap_access > accessMode) ||
2862 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2863 goto out;
2864
2865 /* update mmap open mode */
2866 if (nofp->nof_mmap_access) {
2867 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2868 if (error) {
2869 if (!nfs_mount_state_error_should_restart(error))
2870 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2871 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2872 goto out;
2873 }
2874 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2875 }
2876
2877 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
2878 nofp->nof_mmap_access = accessMode;
2879 nofp->nof_mmap_deny = denyMode;
2880
2881 out:
2882 if (nofp)
2883 nfs_open_file_clear_busy(nofp);
2884 if (nfs_mount_state_in_use_end(nmp, error)) {
2885 nofp = NULL;
2886 goto restart;
2887 }
2888 if (noop)
2889 nfs_open_owner_rele(noop);
2890
2891 if (!error) {
2892 int ismapped = 0;
2893 nfs_node_lock_force(np);
2894 if ((np->n_flag & NISMAPPED) == 0) {
2895 np->n_flag |= NISMAPPED;
2896 ismapped = 1;
2897 }
2898 nfs_node_unlock(np);
2899 if (ismapped) {
2900 lck_mtx_lock(&nmp->nm_lock);
2901 nmp->nm_state &= ~NFSSTA_SQUISHY;
2902 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
2903 if (nmp->nm_curdeadtimeout <= 0)
2904 nmp->nm_deadto_start = 0;
2905 nmp->nm_mappers++;
2906 lck_mtx_unlock(&nmp->nm_lock);
2907 }
2908 }
2909
2910 return (error);
2911 }
2912
2913
2914 int
2915 nfs_vnop_mnomap(
2916 struct vnop_mnomap_args /* {
2917 struct vnodeop_desc *a_desc;
2918 vnode_t a_vp;
2919 vfs_context_t a_context;
2920 } */ *ap)
2921 {
2922 vfs_context_t ctx = ap->a_context;
2923 vnode_t vp = ap->a_vp;
2924 nfsnode_t np = VTONFS(vp);
2925 struct nfsmount *nmp;
2926 struct nfs_open_file *nofp = NULL;
2927 off_t size;
2928 int error;
2929 int is_mapped_flag = 0;
2930
2931 nmp = VTONMP(vp);
2932 if (nfs_mount_gone(nmp))
2933 return (ENXIO);
2934
2935 nfs_node_lock_force(np);
2936 if (np->n_flag & NISMAPPED) {
2937 is_mapped_flag = 1;
2938 np->n_flag &= ~NISMAPPED;
2939 }
2940 nfs_node_unlock(np);
2941 if (is_mapped_flag) {
2942 lck_mtx_lock(&nmp->nm_lock);
2943 if (nmp->nm_mappers)
2944 nmp->nm_mappers--;
2945 else
2946 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2947 lck_mtx_unlock(&nmp->nm_lock);
2948 }
2949
2950 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2951 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
2952 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
2953 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
2954
2955 /* walk all open files and close all mmap opens */
2956 loop:
2957 error = nfs_mount_state_in_use_start(nmp, NULL);
2958 if (error)
2959 return (error);
2960 lck_mtx_lock(&np->n_openlock);
2961 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2962 if (!nofp->nof_mmap_access)
2963 continue;
2964 lck_mtx_unlock(&np->n_openlock);
2965 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2966 nfs_mount_state_in_use_end(nmp, 0);
2967 error = nfs4_reopen(nofp, NULL);
2968 if (!error)
2969 goto loop;
2970 }
2971 if (!error)
2972 error = nfs_open_file_set_busy(nofp, NULL);
2973 if (error) {
2974 lck_mtx_lock(&np->n_openlock);
2975 break;
2976 }
2977 if (nofp->nof_mmap_access) {
2978 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2979 if (!nfs_mount_state_error_should_restart(error)) {
2980 if (error) /* not a state-operation-restarting error, so just clear the access */
2981 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2982 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2983 }
2984 if (error)
2985 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2986 }
2987 nfs_open_file_clear_busy(nofp);
2988 nfs_mount_state_in_use_end(nmp, error);
2989 goto loop;
2990 }
2991 lck_mtx_unlock(&np->n_openlock);
2992 nfs_mount_state_in_use_end(nmp, error);
2993 return (error);
2994 }
2995
2996 /*
2997 * Search a node's lock owner list for the owner for this process.
2998 * If not found and "alloc" is set, then allocate a new one.
2999 */
3000 struct nfs_lock_owner *
3001 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3002 {
3003 pid_t pid = proc_pid(p);
3004 struct nfs_lock_owner *nlop, *newnlop = NULL;
3005
3006 tryagain:
3007 lck_mtx_lock(&np->n_openlock);
3008 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3009 if (nlop->nlo_pid != pid)
3010 continue;
3011 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
3012 break;
3013 /* stale lock owner... reuse it if we can */
3014 if (nlop->nlo_refcnt) {
3015 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3016 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3017 lck_mtx_unlock(&np->n_openlock);
3018 goto tryagain;
3019 }
3020 nlop->nlo_pid_start = p->p_start;
3021 nlop->nlo_seqid = 0;
3022 nlop->nlo_stategenid = 0;
3023 break;
3024 }
3025
3026 if (!nlop && !newnlop && alloc) {
3027 lck_mtx_unlock(&np->n_openlock);
3028 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3029 if (!newnlop)
3030 return (NULL);
3031 bzero(newnlop, sizeof(*newnlop));
3032 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3033 newnlop->nlo_pid = pid;
3034 newnlop->nlo_pid_start = p->p_start;
3035 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3036 TAILQ_INIT(&newnlop->nlo_locks);
3037 goto tryagain;
3038 }
3039 if (!nlop && newnlop) {
3040 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3041 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3042 nlop = newnlop;
3043 }
3044 lck_mtx_unlock(&np->n_openlock);
3045
3046 if (newnlop && (nlop != newnlop))
3047 nfs_lock_owner_destroy(newnlop);
3048
3049 if (nlop)
3050 nfs_lock_owner_ref(nlop);
3051
3052 return (nlop);
3053 }
3054
3055 /*
3056 * destroy a lock owner that's no longer needed
3057 */
3058 void
3059 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3060 {
3061 if (nlop->nlo_open_owner) {
3062 nfs_open_owner_rele(nlop->nlo_open_owner);
3063 nlop->nlo_open_owner = NULL;
3064 }
3065 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3066 FREE(nlop, M_TEMP);
3067 }
3068
3069 /*
3070 * acquire a reference count on a lock owner
3071 */
3072 void
3073 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3074 {
3075 lck_mtx_lock(&nlop->nlo_lock);
3076 nlop->nlo_refcnt++;
3077 lck_mtx_unlock(&nlop->nlo_lock);
3078 }
3079
3080 /*
3081 * drop a reference count on a lock owner and destroy it if
3082 * it is no longer referenced and no longer on the mount's list.
3083 */
3084 void
3085 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3086 {
3087 lck_mtx_lock(&nlop->nlo_lock);
3088 if (nlop->nlo_refcnt < 1)
3089 panic("nfs_lock_owner_rele: no refcnt");
3090 nlop->nlo_refcnt--;
3091 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3092 panic("nfs_lock_owner_rele: busy");
3093 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3094 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3095 lck_mtx_unlock(&nlop->nlo_lock);
3096 return;
3097 }
3098 /* owner is no longer referenced or linked to mount, so destroy it */
3099 lck_mtx_unlock(&nlop->nlo_lock);
3100 nfs_lock_owner_destroy(nlop);
3101 }
3102
3103 /*
3104 * Mark a lock owner as busy because we are about to
3105 * start an operation that uses and updates lock owner state.
3106 */
3107 int
3108 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3109 {
3110 struct nfsmount *nmp;
3111 struct timespec ts = {2, 0};
3112 int error = 0, slpflag;
3113
3114 nmp = nlop->nlo_open_owner->noo_mount;
3115 if (nfs_mount_gone(nmp))
3116 return (ENXIO);
3117 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3118
3119 lck_mtx_lock(&nlop->nlo_lock);
3120 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3121 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
3122 break;
3123 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3124 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3125 slpflag = 0;
3126 }
3127 if (!error)
3128 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3129 lck_mtx_unlock(&nlop->nlo_lock);
3130
3131 return (error);
3132 }
3133
3134 /*
3135 * Clear the busy flag on a lock owner and wake up anyone waiting
3136 * to mark it busy.
3137 */
3138 void
3139 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3140 {
3141 int wanted;
3142
3143 lck_mtx_lock(&nlop->nlo_lock);
3144 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3145 panic("nfs_lock_owner_clear_busy");
3146 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3147 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
3148 lck_mtx_unlock(&nlop->nlo_lock);
3149 if (wanted)
3150 wakeup(nlop);
3151 }
3152
3153 /*
3154 * Insert a held lock into a lock owner's sorted list.
3155 * (flock locks are always inserted at the head the list)
3156 */
3157 void
3158 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3159 {
3160 struct nfs_file_lock *nflp;
3161
3162 /* insert new lock in lock owner's held lock list */
3163 lck_mtx_lock(&nlop->nlo_lock);
3164 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3165 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3166 } else {
3167 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3168 if (newnflp->nfl_start < nflp->nfl_start)
3169 break;
3170 }
3171 if (nflp)
3172 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3173 else
3174 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3175 }
3176 lck_mtx_unlock(&nlop->nlo_lock);
3177 }
3178
3179 /*
3180 * Get a file lock structure for this lock owner.
3181 */
3182 struct nfs_file_lock *
3183 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3184 {
3185 struct nfs_file_lock *nflp = NULL;
3186
3187 lck_mtx_lock(&nlop->nlo_lock);
3188 if (!nlop->nlo_alock.nfl_owner) {
3189 nflp = &nlop->nlo_alock;
3190 nflp->nfl_owner = nlop;
3191 }
3192 lck_mtx_unlock(&nlop->nlo_lock);
3193 if (!nflp) {
3194 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3195 if (!nflp)
3196 return (NULL);
3197 bzero(nflp, sizeof(*nflp));
3198 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3199 nflp->nfl_owner = nlop;
3200 }
3201 nfs_lock_owner_ref(nlop);
3202 return (nflp);
3203 }
3204
3205 /*
3206 * destroy the given NFS file lock structure
3207 */
3208 void
3209 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3210 {
3211 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3212
3213 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3214 nflp->nfl_owner = NULL;
3215 FREE(nflp, M_TEMP);
3216 } else {
3217 lck_mtx_lock(&nlop->nlo_lock);
3218 bzero(nflp, sizeof(*nflp));
3219 lck_mtx_unlock(&nlop->nlo_lock);
3220 }
3221 nfs_lock_owner_rele(nlop);
3222 }
3223
3224 /*
3225 * Check if one file lock conflicts with another.
3226 * (nflp1 is the new lock. nflp2 is the existing lock.)
3227 */
3228 int
3229 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3230 {
3231 /* no conflict if lock is dead */
3232 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
3233 return (0);
3234 /* no conflict if it's ours - unless the lock style doesn't match */
3235 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3236 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3237 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3238 (nflp1->nfl_start > nflp2->nfl_start) &&
3239 (nflp1->nfl_end < nflp2->nfl_end))
3240 *willsplit = 1;
3241 return (0);
3242 }
3243 /* no conflict if ranges don't overlap */
3244 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
3245 return (0);
3246 /* no conflict if neither lock is exclusive */
3247 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
3248 return (0);
3249 /* conflict */
3250 return (1);
3251 }
3252
3253 /*
3254 * Send an NFSv4 LOCK RPC to the server.
3255 */
3256 int
3257 nfs4_setlock_rpc(
3258 nfsnode_t np,
3259 struct nfs_open_file *nofp,
3260 struct nfs_file_lock *nflp,
3261 int reclaim,
3262 int flags,
3263 thread_t thd,
3264 kauth_cred_t cred)
3265 {
3266 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3267 struct nfsmount *nmp;
3268 struct nfsm_chain nmreq, nmrep;
3269 uint64_t xid;
3270 uint32_t locktype;
3271 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3272 struct nfsreq_secinfo_args si;
3273
3274 nmp = NFSTONMP(np);
3275 if (nfs_mount_gone(nmp))
3276 return (ENXIO);
3277 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3278 return (EINVAL);
3279
3280 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3281 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3282 ((nflp->nfl_type == F_WRLCK) ?
3283 NFS_LOCK_TYPE_WRITEW :
3284 NFS_LOCK_TYPE_READW) :
3285 ((nflp->nfl_type == F_WRLCK) ?
3286 NFS_LOCK_TYPE_WRITE :
3287 NFS_LOCK_TYPE_READ);
3288 if (newlocker) {
3289 error = nfs_open_file_set_busy(nofp, thd);
3290 if (error)
3291 return (error);
3292 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3293 if (error) {
3294 nfs_open_file_clear_busy(nofp);
3295 return (error);
3296 }
3297 if (!nlop->nlo_open_owner) {
3298 nfs_open_owner_ref(nofp->nof_owner);
3299 nlop->nlo_open_owner = nofp->nof_owner;
3300 }
3301 }
3302 error = nfs_lock_owner_set_busy(nlop, thd);
3303 if (error) {
3304 if (newlocker) {
3305 nfs_open_owner_clear_busy(nofp->nof_owner);
3306 nfs_open_file_clear_busy(nofp);
3307 }
3308 return (error);
3309 }
3310
3311 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3312 nfsm_chain_null(&nmreq);
3313 nfsm_chain_null(&nmrep);
3314
3315 // PUTFH, GETATTR, LOCK
3316 numops = 3;
3317 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3318 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3319 numops--;
3320 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3321 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3322 numops--;
3323 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3324 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3325 numops--;
3326 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3327 nfsm_chain_add_32(error, &nmreq, locktype);
3328 nfsm_chain_add_32(error, &nmreq, reclaim);
3329 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3330 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3331 nfsm_chain_add_32(error, &nmreq, newlocker);
3332 if (newlocker) {
3333 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3334 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3335 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3336 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3337 } else {
3338 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3339 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3340 }
3341 nfsm_chain_build_done(error, &nmreq);
3342 nfsm_assert(error, (numops == 0), EPROTO);
3343 nfsmout_if(error);
3344
3345 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3346
3347 if ((lockerror = nfs_node_lock(np)))
3348 error = lockerror;
3349 nfsm_chain_skip_tag(error, &nmrep);
3350 nfsm_chain_get_32(error, &nmrep, numops);
3351 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3352 nfsmout_if(error);
3353 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3354 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3355 nfsmout_if(error);
3356 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3357 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3358 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3359
3360 /* Update the lock owner's stategenid once it appears the server has state for it. */
3361 /* We determine this by noting the request was successful (we got a stateid). */
3362 if (newlocker && !error)
3363 nlop->nlo_stategenid = nmp->nm_stategenid;
3364 nfsmout:
3365 if (!lockerror)
3366 nfs_node_unlock(np);
3367 nfs_lock_owner_clear_busy(nlop);
3368 if (newlocker) {
3369 nfs_open_owner_clear_busy(nofp->nof_owner);
3370 nfs_open_file_clear_busy(nofp);
3371 }
3372 nfsm_chain_cleanup(&nmreq);
3373 nfsm_chain_cleanup(&nmrep);
3374 return (error);
3375 }
3376
3377 /*
3378 * Send an NFSv4 LOCKU RPC to the server.
3379 */
3380 int
3381 nfs4_unlock_rpc(
3382 nfsnode_t np,
3383 struct nfs_lock_owner *nlop,
3384 int type,
3385 uint64_t start,
3386 uint64_t end,
3387 int flags,
3388 thread_t thd,
3389 kauth_cred_t cred)
3390 {
3391 struct nfsmount *nmp;
3392 struct nfsm_chain nmreq, nmrep;
3393 uint64_t xid;
3394 int error = 0, lockerror = ENOENT, numops, status;
3395 struct nfsreq_secinfo_args si;
3396
3397 nmp = NFSTONMP(np);
3398 if (nfs_mount_gone(nmp))
3399 return (ENXIO);
3400 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3401 return (EINVAL);
3402
3403 error = nfs_lock_owner_set_busy(nlop, NULL);
3404 if (error)
3405 return (error);
3406
3407 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3408 nfsm_chain_null(&nmreq);
3409 nfsm_chain_null(&nmrep);
3410
3411 // PUTFH, GETATTR, LOCKU
3412 numops = 3;
3413 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3414 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3415 numops--;
3416 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3417 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3418 numops--;
3419 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3420 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3421 numops--;
3422 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3423 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3424 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3425 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3426 nfsm_chain_add_64(error, &nmreq, start);
3427 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3428 nfsm_chain_build_done(error, &nmreq);
3429 nfsm_assert(error, (numops == 0), EPROTO);
3430 nfsmout_if(error);
3431
3432 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3433
3434 if ((lockerror = nfs_node_lock(np)))
3435 error = lockerror;
3436 nfsm_chain_skip_tag(error, &nmrep);
3437 nfsm_chain_get_32(error, &nmrep, numops);
3438 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3439 nfsmout_if(error);
3440 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3441 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3442 nfsmout_if(error);
3443 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3444 nfs_owner_seqid_increment(NULL, nlop, error);
3445 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3446 nfsmout:
3447 if (!lockerror)
3448 nfs_node_unlock(np);
3449 nfs_lock_owner_clear_busy(nlop);
3450 nfsm_chain_cleanup(&nmreq);
3451 nfsm_chain_cleanup(&nmrep);
3452 return (error);
3453 }
3454
3455 /*
3456 * Send an NFSv4 LOCKT RPC to the server.
3457 */
3458 int
3459 nfs4_getlock_rpc(
3460 nfsnode_t np,
3461 struct nfs_lock_owner *nlop,
3462 struct flock *fl,
3463 uint64_t start,
3464 uint64_t end,
3465 vfs_context_t ctx)
3466 {
3467 struct nfsmount *nmp;
3468 struct nfsm_chain nmreq, nmrep;
3469 uint64_t xid, val64 = 0;
3470 uint32_t val = 0;
3471 int error = 0, lockerror, numops, status;
3472 struct nfsreq_secinfo_args si;
3473
3474 nmp = NFSTONMP(np);
3475 if (nfs_mount_gone(nmp))
3476 return (ENXIO);
3477 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3478 return (EINVAL);
3479
3480 lockerror = ENOENT;
3481 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3482 nfsm_chain_null(&nmreq);
3483 nfsm_chain_null(&nmrep);
3484
3485 // PUTFH, GETATTR, LOCKT
3486 numops = 3;
3487 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3488 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3489 numops--;
3490 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3491 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3492 numops--;
3493 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3494 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3495 numops--;
3496 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3497 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3498 nfsm_chain_add_64(error, &nmreq, start);
3499 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3500 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3501 nfsm_chain_build_done(error, &nmreq);
3502 nfsm_assert(error, (numops == 0), EPROTO);
3503 nfsmout_if(error);
3504
3505 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3506
3507 if ((lockerror = nfs_node_lock(np)))
3508 error = lockerror;
3509 nfsm_chain_skip_tag(error, &nmrep);
3510 nfsm_chain_get_32(error, &nmrep, numops);
3511 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3512 nfsmout_if(error);
3513 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3514 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3515 nfsmout_if(error);
3516 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3517 if (error == NFSERR_DENIED) {
3518 error = 0;
3519 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3520 nfsm_chain_get_64(error, &nmrep, val64);
3521 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3522 nfsm_chain_get_32(error, &nmrep, val);
3523 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3524 fl->l_pid = 0;
3525 fl->l_whence = SEEK_SET;
3526 } else if (!error) {
3527 fl->l_type = F_UNLCK;
3528 }
3529 nfsmout:
3530 if (!lockerror)
3531 nfs_node_unlock(np);
3532 nfsm_chain_cleanup(&nmreq);
3533 nfsm_chain_cleanup(&nmrep);
3534 return (error);
3535 }
3536
3537
3538 /*
3539 * Check for any conflicts with the given lock.
3540 *
3541 * Checking for a lock doesn't require the file to be opened.
3542 * So we skip all the open owner, open file, lock owner work
3543 * and just check for a conflicting lock.
3544 */
3545 int
3546 nfs_advlock_getlock(
3547 nfsnode_t np,
3548 struct nfs_lock_owner *nlop,
3549 struct flock *fl,
3550 uint64_t start,
3551 uint64_t end,
3552 vfs_context_t ctx)
3553 {
3554 struct nfsmount *nmp;
3555 struct nfs_file_lock *nflp;
3556 int error = 0, answered = 0;
3557
3558 nmp = NFSTONMP(np);
3559 if (nfs_mount_gone(nmp))
3560 return (ENXIO);
3561
3562 restart:
3563 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx))))
3564 return (error);
3565
3566 lck_mtx_lock(&np->n_openlock);
3567 /* scan currently held locks for conflict */
3568 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3569 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3570 continue;
3571 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3572 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3573 break;
3574 }
3575 if (nflp) {
3576 /* found a conflicting lock */
3577 fl->l_type = nflp->nfl_type;
3578 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3579 fl->l_start = nflp->nfl_start;
3580 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3581 fl->l_whence = SEEK_SET;
3582 answered = 1;
3583 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3584 /*
3585 * If we have a write delegation, we know there can't be other
3586 * locks on the server. So the answer is no conflicting lock found.
3587 */
3588 fl->l_type = F_UNLCK;
3589 answered = 1;
3590 }
3591 lck_mtx_unlock(&np->n_openlock);
3592 if (answered) {
3593 nfs_mount_state_in_use_end(nmp, 0);
3594 return (0);
3595 }
3596
3597 /* no conflict found locally, so ask the server */
3598 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3599
3600 if (nfs_mount_state_in_use_end(nmp, error))
3601 goto restart;
3602 return (error);
3603 }
3604
3605 /*
3606 * Acquire a file lock for the given range.
3607 *
3608 * Add the lock (request) to the lock queue.
3609 * Scan the lock queue for any conflicting locks.
3610 * If a conflict is found, block or return an error.
3611 * Once end of queue is reached, send request to the server.
3612 * If the server grants the lock, scan the lock queue and
3613 * update any existing locks. Then (optionally) scan the
3614 * queue again to coalesce any locks adjacent to the new one.
3615 */
3616 int
3617 nfs_advlock_setlock(
3618 nfsnode_t np,
3619 struct nfs_open_file *nofp,
3620 struct nfs_lock_owner *nlop,
3621 int op,
3622 uint64_t start,
3623 uint64_t end,
3624 int style,
3625 short type,
3626 vfs_context_t ctx)
3627 {
3628 struct nfsmount *nmp;
3629 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3630 struct nfs_file_lock *coalnflp;
3631 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3632 struct timespec ts = {1, 0};
3633
3634 nmp = NFSTONMP(np);
3635 if (nfs_mount_gone(nmp))
3636 return (ENXIO);
3637 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3638
3639 if ((type != F_RDLCK) && (type != F_WRLCK))
3640 return (EINVAL);
3641
3642 /* allocate a new lock */
3643 newnflp = nfs_file_lock_alloc(nlop);
3644 if (!newnflp)
3645 return (ENOLCK);
3646 newnflp->nfl_start = start;
3647 newnflp->nfl_end = end;
3648 newnflp->nfl_type = type;
3649 if (op == F_SETLKW)
3650 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3651 newnflp->nfl_flags |= style;
3652 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3653
3654 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3655 /*
3656 * For exclusive flock-style locks, if we block waiting for the
3657 * lock, we need to first release any currently held shared
3658 * flock-style lock. So, the first thing we do is check if we
3659 * have a shared flock-style lock.
3660 */
3661 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3662 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3663 nflp = NULL;
3664 if (nflp && (nflp->nfl_type != F_RDLCK))
3665 nflp = NULL;
3666 flocknflp = nflp;
3667 }
3668
3669 restart:
3670 restart = 0;
3671 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3672 if (error)
3673 goto error_out;
3674 inuse = 1;
3675 if (np->n_flag & NREVOKE) {
3676 error = EIO;
3677 nfs_mount_state_in_use_end(nmp, 0);
3678 inuse = 0;
3679 goto error_out;
3680 }
3681 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3682 nfs_mount_state_in_use_end(nmp, 0);
3683 inuse = 0;
3684 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3685 if (error)
3686 goto error_out;
3687 goto restart;
3688 }
3689
3690 lck_mtx_lock(&np->n_openlock);
3691 if (!inqueue) {
3692 /* insert new lock at beginning of list */
3693 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3694 inqueue = 1;
3695 }
3696
3697 /* scan current list of locks (held and pending) for conflicts */
3698 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3699 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3700 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3701 continue;
3702 /* Conflict */
3703 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3704 error = EAGAIN;
3705 break;
3706 }
3707 /* Block until this lock is no longer held. */
3708 if (nflp->nfl_blockcnt == UINT_MAX) {
3709 error = ENOLCK;
3710 break;
3711 }
3712 nflp->nfl_blockcnt++;
3713 do {
3714 if (flocknflp) {
3715 /* release any currently held shared lock before sleeping */
3716 lck_mtx_unlock(&np->n_openlock);
3717 nfs_mount_state_in_use_end(nmp, 0);
3718 inuse = 0;
3719 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3720 flocknflp = NULL;
3721 if (!error)
3722 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3723 if (error) {
3724 lck_mtx_lock(&np->n_openlock);
3725 break;
3726 }
3727 inuse = 1;
3728 lck_mtx_lock(&np->n_openlock);
3729 /* no need to block/sleep if the conflict is gone */
3730 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3731 break;
3732 }
3733 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
3734 slpflag = 0;
3735 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3736 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3737 /* looks like we have a recover pending... restart */
3738 restart = 1;
3739 lck_mtx_unlock(&np->n_openlock);
3740 nfs_mount_state_in_use_end(nmp, 0);
3741 inuse = 0;
3742 lck_mtx_lock(&np->n_openlock);
3743 break;
3744 }
3745 if (!error && (np->n_flag & NREVOKE))
3746 error = EIO;
3747 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3748 nflp->nfl_blockcnt--;
3749 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3750 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3751 nfs_file_lock_destroy(nflp);
3752 }
3753 if (error || restart)
3754 break;
3755 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3756 /* So, start this lock-scanning loop over from where it started. */
3757 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
3758 }
3759 lck_mtx_unlock(&np->n_openlock);
3760 if (restart)
3761 goto restart;
3762 if (error)
3763 goto error_out;
3764
3765 if (willsplit) {
3766 /*
3767 * It looks like this operation is splitting a lock.
3768 * We allocate a new lock now so we don't have to worry
3769 * about the allocation failing after we've updated some state.
3770 */
3771 nflp2 = nfs_file_lock_alloc(nlop);
3772 if (!nflp2) {
3773 error = ENOLCK;
3774 goto error_out;
3775 }
3776 }
3777
3778 /* once scan for local conflicts is clear, send request to server */
3779 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
3780 goto error_out;
3781 busy = 1;
3782 delay = 0;
3783 do {
3784 /* do we have a delegation? (that we're not returning?) */
3785 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
3786 if (np->n_openflags & N_DELEG_WRITE) {
3787 /* with a write delegation, just take the lock delegated */
3788 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
3789 error = 0;
3790 /* make sure the lock owner knows its open owner */
3791 if (!nlop->nlo_open_owner) {
3792 nfs_open_owner_ref(nofp->nof_owner);
3793 nlop->nlo_open_owner = nofp->nof_owner;
3794 }
3795 break;
3796 } else {
3797 /*
3798 * If we don't have any non-delegated opens but we do have
3799 * delegated opens, then we need to first claim the delegated
3800 * opens so that the lock request on the server can be associated
3801 * with an open it knows about.
3802 */
3803 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
3804 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
3805 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
3806 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
3807 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
3808 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
3809 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
3810 if (error)
3811 break;
3812 }
3813 }
3814 }
3815 if (np->n_flag & NREVOKE)
3816 error = EIO;
3817 if (!error)
3818 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
3819 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3820 break;
3821 /* request was denied due to either conflict or grace period */
3822 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3823 error = EAGAIN;
3824 break;
3825 }
3826 if (flocknflp) {
3827 /* release any currently held shared lock before sleeping */
3828 nfs_open_state_clear_busy(np);
3829 busy = 0;
3830 nfs_mount_state_in_use_end(nmp, 0);
3831 inuse = 0;
3832 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3833 flocknflp = NULL;
3834 if (!error2)
3835 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3836 if (!error2) {
3837 inuse = 1;
3838 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
3839 }
3840 if (error2) {
3841 error = error2;
3842 break;
3843 }
3844 busy = 1;
3845 }
3846 /*
3847 * Wait a little bit and send the request again.
3848 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3849 */
3850 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
3851 if (error == NFSERR_GRACE)
3852 delay = 4;
3853 if (delay < 4)
3854 delay++;
3855 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2));
3856 slpflag = 0;
3857 }
3858 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3859 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3860 /* looks like we have a recover pending... restart */
3861 nfs_open_state_clear_busy(np);
3862 busy = 0;
3863 nfs_mount_state_in_use_end(nmp, 0);
3864 inuse = 0;
3865 goto restart;
3866 }
3867 if (!error && (np->n_flag & NREVOKE))
3868 error = EIO;
3869 } while (!error);
3870
3871 error_out:
3872 if (nfs_mount_state_error_should_restart(error)) {
3873 /* looks like we need to restart this operation */
3874 if (busy) {
3875 nfs_open_state_clear_busy(np);
3876 busy = 0;
3877 }
3878 if (inuse) {
3879 nfs_mount_state_in_use_end(nmp, error);
3880 inuse = 0;
3881 }
3882 goto restart;
3883 }
3884 lck_mtx_lock(&np->n_openlock);
3885 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3886 if (error) {
3887 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3888 if (newnflp->nfl_blockcnt) {
3889 /* wake up anyone blocked on this lock */
3890 wakeup(newnflp);
3891 } else {
3892 /* remove newnflp from lock list and destroy */
3893 if (inqueue)
3894 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3895 nfs_file_lock_destroy(newnflp);
3896 }
3897 lck_mtx_unlock(&np->n_openlock);
3898 if (busy)
3899 nfs_open_state_clear_busy(np);
3900 if (inuse)
3901 nfs_mount_state_in_use_end(nmp, error);
3902 if (nflp2)
3903 nfs_file_lock_destroy(nflp2);
3904 return (error);
3905 }
3906
3907 /* server granted the lock */
3908
3909 /*
3910 * Scan for locks to update.
3911 *
3912 * Locks completely covered are killed.
3913 * At most two locks may need to be clipped.
3914 * It's possible that a single lock may need to be split.
3915 */
3916 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3917 if (nflp == newnflp)
3918 continue;
3919 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3920 continue;
3921 if (nflp->nfl_owner != nlop)
3922 continue;
3923 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3924 continue;
3925 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3926 continue;
3927 /* here's one to update */
3928 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3929 /* The entire lock is being replaced. */
3930 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3931 lck_mtx_lock(&nlop->nlo_lock);
3932 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3933 lck_mtx_unlock(&nlop->nlo_lock);
3934 /* lock will be destroyed below, if no waiters */
3935 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3936 /* We're replacing a range in the middle of a lock. */
3937 /* The current lock will be split into two locks. */
3938 /* Update locks and insert new lock after current lock. */
3939 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
3940 nflp2->nfl_type = nflp->nfl_type;
3941 nflp2->nfl_start = newnflp->nfl_end + 1;
3942 nflp2->nfl_end = nflp->nfl_end;
3943 nflp->nfl_end = newnflp->nfl_start - 1;
3944 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3945 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3946 nextnflp = nflp2;
3947 nflp2 = NULL;
3948 } else if (newnflp->nfl_start > nflp->nfl_start) {
3949 /* We're replacing the end of a lock. */
3950 nflp->nfl_end = newnflp->nfl_start - 1;
3951 } else if (newnflp->nfl_end < nflp->nfl_end) {
3952 /* We're replacing the start of a lock. */
3953 nflp->nfl_start = newnflp->nfl_end + 1;
3954 }
3955 if (nflp->nfl_blockcnt) {
3956 /* wake up anyone blocked on this lock */
3957 wakeup(nflp);
3958 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3959 /* remove nflp from lock list and destroy */
3960 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3961 nfs_file_lock_destroy(nflp);
3962 }
3963 }
3964
3965 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3966
3967 /*
3968 * POSIX locks should be coalesced when possible.
3969 */
3970 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3971 /*
3972 * Walk through the lock queue and check each of our held locks with
3973 * the previous and next locks in the lock owner's "held lock list".
3974 * If the two locks can be coalesced, we merge the current lock into
3975 * the other (previous or next) lock. Merging this way makes sure that
3976 * lock ranges are always merged forward in the lock queue. This is
3977 * important because anyone blocked on the lock being "merged away"
3978 * will still need to block on that range and it will simply continue
3979 * checking locks that are further down the list.
3980 */
3981 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3982 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3983 continue;
3984 if (nflp->nfl_owner != nlop)
3985 continue;
3986 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3987 continue;
3988 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3989 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3990 (coalnflp->nfl_type == nflp->nfl_type) &&
3991 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3992 coalnflp->nfl_end = nflp->nfl_end;
3993 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3994 lck_mtx_lock(&nlop->nlo_lock);
3995 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3996 lck_mtx_unlock(&nlop->nlo_lock);
3997 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3998 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3999 (coalnflp->nfl_type == nflp->nfl_type) &&
4000 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4001 coalnflp->nfl_start = nflp->nfl_start;
4002 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4003 lck_mtx_lock(&nlop->nlo_lock);
4004 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4005 lck_mtx_unlock(&nlop->nlo_lock);
4006 }
4007 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
4008 continue;
4009 if (nflp->nfl_blockcnt) {
4010 /* wake up anyone blocked on this lock */
4011 wakeup(nflp);
4012 } else {
4013 /* remove nflp from lock list and destroy */
4014 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4015 nfs_file_lock_destroy(nflp);
4016 }
4017 }
4018 }
4019
4020 lck_mtx_unlock(&np->n_openlock);
4021 nfs_open_state_clear_busy(np);
4022 nfs_mount_state_in_use_end(nmp, error);
4023
4024 if (nflp2)
4025 nfs_file_lock_destroy(nflp2);
4026 return (error);
4027 }
4028
4029 /*
4030 * Release all (same style) locks within the given range.
4031 */
4032 int
4033 nfs_advlock_unlock(
4034 nfsnode_t np,
4035 struct nfs_open_file *nofp,
4036 struct nfs_lock_owner *nlop,
4037 uint64_t start,
4038 uint64_t end,
4039 int style,
4040 vfs_context_t ctx)
4041 {
4042 struct nfsmount *nmp;
4043 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4044 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4045
4046 nmp = NFSTONMP(np);
4047 if (nfs_mount_gone(nmp))
4048 return (ENXIO);
4049
4050 restart:
4051 if ((error = nfs_mount_state_in_use_start(nmp, NULL)))
4052 return (error);
4053 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4054 nfs_mount_state_in_use_end(nmp, 0);
4055 error = nfs4_reopen(nofp, NULL);
4056 if (error)
4057 return (error);
4058 goto restart;
4059 }
4060 if ((error = nfs_open_state_set_busy(np, NULL))) {
4061 nfs_mount_state_in_use_end(nmp, error);
4062 return (error);
4063 }
4064
4065 lck_mtx_lock(&np->n_openlock);
4066 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4067 /*
4068 * We may need to allocate a new lock if an existing lock gets split.
4069 * So, we first scan the list to check for a split, and if there's
4070 * going to be one, we'll allocate one now.
4071 */
4072 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4073 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4074 continue;
4075 if (nflp->nfl_owner != nlop)
4076 continue;
4077 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4078 continue;
4079 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4080 continue;
4081 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4082 willsplit = 1;
4083 break;
4084 }
4085 }
4086 if (willsplit) {
4087 lck_mtx_unlock(&np->n_openlock);
4088 nfs_open_state_clear_busy(np);
4089 nfs_mount_state_in_use_end(nmp, 0);
4090 newnflp = nfs_file_lock_alloc(nlop);
4091 if (!newnflp)
4092 return (ENOMEM);
4093 goto restart;
4094 }
4095 }
4096
4097 /*
4098 * Free all of our locks in the given range.
4099 *
4100 * Note that this process requires sending requests to the server.
4101 * Because of this, we will release the n_openlock while performing
4102 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4103 * locks from changing underneath us. However, other entries in the
4104 * list may be removed. So we need to be careful walking the list.
4105 */
4106
4107 /*
4108 * Don't unlock ranges that are held by other-style locks.
4109 * If style is posix, don't send any unlock rpcs if flock is held.
4110 * If we unlock an flock, don't send unlock rpcs for any posix-style
4111 * ranges held - instead send unlocks for the ranges not held.
4112 */
4113 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4114 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4115 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
4116 send_unlock_rpcs = 0;
4117 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4118 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4119 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4120 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4121 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4122 uint64_t s = 0;
4123 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4124 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4125 while (!delegated && nflp) {
4126 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4127 /* unlock the range preceding this lock */
4128 lck_mtx_unlock(&np->n_openlock);
4129 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0,
4130 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4131 if (nfs_mount_state_error_should_restart(error)) {
4132 nfs_open_state_clear_busy(np);
4133 nfs_mount_state_in_use_end(nmp, error);
4134 goto restart;
4135 }
4136 lck_mtx_lock(&np->n_openlock);
4137 if (error)
4138 goto out;
4139 s = nflp->nfl_end+1;
4140 }
4141 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4142 }
4143 if (!delegated) {
4144 lck_mtx_unlock(&np->n_openlock);
4145 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4146 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4147 if (nfs_mount_state_error_should_restart(error)) {
4148 nfs_open_state_clear_busy(np);
4149 nfs_mount_state_in_use_end(nmp, error);
4150 goto restart;
4151 }
4152 lck_mtx_lock(&np->n_openlock);
4153 if (error)
4154 goto out;
4155 }
4156 send_unlock_rpcs = 0;
4157 }
4158
4159 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4160 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4161 continue;
4162 if (nflp->nfl_owner != nlop)
4163 continue;
4164 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4165 continue;
4166 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4167 continue;
4168 /* here's one to unlock */
4169 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4170 /* The entire lock is being unlocked. */
4171 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4172 lck_mtx_unlock(&np->n_openlock);
4173 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4174 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4175 if (nfs_mount_state_error_should_restart(error)) {
4176 nfs_open_state_clear_busy(np);
4177 nfs_mount_state_in_use_end(nmp, error);
4178 goto restart;
4179 }
4180 lck_mtx_lock(&np->n_openlock);
4181 }
4182 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4183 if (error)
4184 break;
4185 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4186 lck_mtx_lock(&nlop->nlo_lock);
4187 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4188 lck_mtx_unlock(&nlop->nlo_lock);
4189 /* lock will be destroyed below, if no waiters */
4190 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4191 /* We're unlocking a range in the middle of a lock. */
4192 /* The current lock will be split into two locks. */
4193 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4194 lck_mtx_unlock(&np->n_openlock);
4195 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4196 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4197 if (nfs_mount_state_error_should_restart(error)) {
4198 nfs_open_state_clear_busy(np);
4199 nfs_mount_state_in_use_end(nmp, error);
4200 goto restart;
4201 }
4202 lck_mtx_lock(&np->n_openlock);
4203 }
4204 if (error)
4205 break;
4206 /* update locks and insert new lock after current lock */
4207 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
4208 newnflp->nfl_type = nflp->nfl_type;
4209 newnflp->nfl_start = end + 1;
4210 newnflp->nfl_end = nflp->nfl_end;
4211 nflp->nfl_end = start - 1;
4212 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4213 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4214 nextnflp = newnflp;
4215 newnflp = NULL;
4216 } else if (start > nflp->nfl_start) {
4217 /* We're unlocking the end of a lock. */
4218 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4219 lck_mtx_unlock(&np->n_openlock);
4220 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4221 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4222 if (nfs_mount_state_error_should_restart(error)) {
4223 nfs_open_state_clear_busy(np);
4224 nfs_mount_state_in_use_end(nmp, error);
4225 goto restart;
4226 }
4227 lck_mtx_lock(&np->n_openlock);
4228 }
4229 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4230 if (error)
4231 break;
4232 nflp->nfl_end = start - 1;
4233 } else if (end < nflp->nfl_end) {
4234 /* We're unlocking the start of a lock. */
4235 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4236 lck_mtx_unlock(&np->n_openlock);
4237 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4238 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4239 if (nfs_mount_state_error_should_restart(error)) {
4240 nfs_open_state_clear_busy(np);
4241 nfs_mount_state_in_use_end(nmp, error);
4242 goto restart;
4243 }
4244 lck_mtx_lock(&np->n_openlock);
4245 }
4246 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4247 if (error)
4248 break;
4249 nflp->nfl_start = end + 1;
4250 }
4251 if (nflp->nfl_blockcnt) {
4252 /* wake up anyone blocked on this lock */
4253 wakeup(nflp);
4254 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4255 /* remove nflp from lock list and destroy */
4256 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4257 nfs_file_lock_destroy(nflp);
4258 }
4259 }
4260 out:
4261 lck_mtx_unlock(&np->n_openlock);
4262 nfs_open_state_clear_busy(np);
4263 nfs_mount_state_in_use_end(nmp, 0);
4264
4265 if (newnflp)
4266 nfs_file_lock_destroy(newnflp);
4267 return (error);
4268 }
4269
4270 /*
4271 * NFSv4 advisory file locking
4272 */
4273 int
4274 nfs_vnop_advlock(
4275 struct vnop_advlock_args /* {
4276 struct vnodeop_desc *a_desc;
4277 vnode_t a_vp;
4278 caddr_t a_id;
4279 int a_op;
4280 struct flock *a_fl;
4281 int a_flags;
4282 vfs_context_t a_context;
4283 } */ *ap)
4284 {
4285 vnode_t vp = ap->a_vp;
4286 nfsnode_t np = VTONFS(ap->a_vp);
4287 struct flock *fl = ap->a_fl;
4288 int op = ap->a_op;
4289 int flags = ap->a_flags;
4290 vfs_context_t ctx = ap->a_context;
4291 struct nfsmount *nmp;
4292 struct nfs_open_owner *noop = NULL;
4293 struct nfs_open_file *nofp = NULL;
4294 struct nfs_lock_owner *nlop = NULL;
4295 off_t lstart;
4296 uint64_t start, end;
4297 int error = 0, modified, style;
4298 enum vtype vtype;
4299 #define OFF_MAX QUAD_MAX
4300
4301 nmp = VTONMP(ap->a_vp);
4302 if (nfs_mount_gone(nmp))
4303 return (ENXIO);
4304 lck_mtx_lock(&nmp->nm_lock);
4305 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4306 lck_mtx_unlock(&nmp->nm_lock);
4307 return (ENOTSUP);
4308 }
4309 lck_mtx_unlock(&nmp->nm_lock);
4310
4311 if (np->n_flag & NREVOKE)
4312 return (EIO);
4313 vtype = vnode_vtype(ap->a_vp);
4314 if (vtype == VDIR) /* ignore lock requests on directories */
4315 return (0);
4316 if (vtype != VREG) /* anything other than regular files is invalid */
4317 return (EINVAL);
4318
4319 /* Convert the flock structure into a start and end. */
4320 switch (fl->l_whence) {
4321 case SEEK_SET:
4322 case SEEK_CUR:
4323 /*
4324 * Caller is responsible for adding any necessary offset
4325 * to fl->l_start when SEEK_CUR is used.
4326 */
4327 lstart = fl->l_start;
4328 break;
4329 case SEEK_END:
4330 /* need to flush, and refetch attributes to make */
4331 /* sure we have the correct end of file offset */
4332 if ((error = nfs_node_lock(np)))
4333 return (error);
4334 modified = (np->n_flag & NMODIFIED);
4335 nfs_node_unlock(np);
4336 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
4337 return (error);
4338 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED)))
4339 return (error);
4340 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4341 if ((np->n_size > OFF_MAX) ||
4342 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
4343 error = EOVERFLOW;
4344 lstart = np->n_size + fl->l_start;
4345 nfs_data_unlock(np);
4346 if (error)
4347 return (error);
4348 break;
4349 default:
4350 return (EINVAL);
4351 }
4352 if (lstart < 0)
4353 return (EINVAL);
4354 start = lstart;
4355 if (fl->l_len == 0) {
4356 end = UINT64_MAX;
4357 } else if (fl->l_len > 0) {
4358 if ((fl->l_len - 1) > (OFF_MAX - lstart))
4359 return (EOVERFLOW);
4360 end = start - 1 + fl->l_len;
4361 } else { /* l_len is negative */
4362 if ((lstart + fl->l_len) < 0)
4363 return (EINVAL);
4364 end = start - 1;
4365 start += fl->l_len;
4366 }
4367 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX))))
4368 return (EINVAL);
4369
4370 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4371 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
4372 return (EINVAL);
4373
4374 /* find the lock owner, alloc if not unlock */
4375 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4376 if (!nlop) {
4377 error = (op == F_UNLCK) ? 0 : ENOMEM;
4378 if (error)
4379 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4380 goto out;
4381 }
4382
4383 if (op == F_GETLK) {
4384 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4385 } else {
4386 /* find the open owner */
4387 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4388 if (!noop) {
4389 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4390 error = EPERM;
4391 goto out;
4392 }
4393 /* find the open file */
4394 restart:
4395 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4396 if (error)
4397 error = EBADF;
4398 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4399 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4400 error = EIO;
4401 }
4402 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4403 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4404 nofp = NULL;
4405 if (!error)
4406 goto restart;
4407 }
4408 if (error) {
4409 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4410 goto out;
4411 }
4412 if (op == F_UNLCK) {
4413 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4414 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4415 if ((op == F_SETLK) && (flags & F_WAIT))
4416 op = F_SETLKW;
4417 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4418 } else {
4419 /* not getlk, unlock or lock? */
4420 error = EINVAL;
4421 }
4422 }
4423
4424 out:
4425 if (nlop)
4426 nfs_lock_owner_rele(nlop);
4427 if (noop)
4428 nfs_open_owner_rele(noop);
4429 return (error);
4430 }
4431
4432 /*
4433 * Check if an open owner holds any locks on a file.
4434 */
4435 int
4436 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4437 {
4438 struct nfs_lock_owner *nlop;
4439
4440 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4441 if (nlop->nlo_open_owner != noop)
4442 continue;
4443 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4444 break;
4445 }
4446 return (nlop ? 1 : 0);
4447 }
4448
4449 /*
4450 * Reopen simple (no deny, no locks) open state that was lost.
4451 */
4452 int
4453 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4454 {
4455 struct nfs_open_owner *noop = nofp->nof_owner;
4456 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4457 nfsnode_t np = nofp->nof_np;
4458 vnode_t vp = NFSTOV(np);
4459 vnode_t dvp = NULL;
4460 struct componentname cn;
4461 const char *vname = NULL;
4462 const char *name = NULL;
4463 size_t namelen;
4464 char smallname[128];
4465 char *filename = NULL;
4466 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4467 struct timespec ts = { 1, 0 };
4468
4469 lck_mtx_lock(&nofp->nof_lock);
4470 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4471 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4472 break;
4473 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
4474 slpflag = 0;
4475 }
4476 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4477 lck_mtx_unlock(&nofp->nof_lock);
4478 return (error);
4479 }
4480 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4481 lck_mtx_unlock(&nofp->nof_lock);
4482
4483 nfs_node_lock_force(np);
4484 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4485 /*
4486 * The node's been sillyrenamed, so we need to use
4487 * the sillyrename directory/name to do the open.
4488 */
4489 struct nfs_sillyrename *nsp = np->n_sillyrename;
4490 dvp = NFSTOV(nsp->nsr_dnp);
4491 if ((error = vnode_get(dvp))) {
4492 nfs_node_unlock(np);
4493 goto out;
4494 }
4495 name = nsp->nsr_name;
4496 } else {
4497 /*
4498 * [sigh] We can't trust VFS to get the parent right for named
4499 * attribute nodes. (It likes to reparent the nodes after we've
4500 * created them.) Luckily we can probably get the right parent
4501 * from the n_parent we have stashed away.
4502 */
4503 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4504 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4505 dvp = NULL;
4506 if (!dvp)
4507 dvp = vnode_getparent(vp);
4508 vname = vnode_getname(vp);
4509 if (!dvp || !vname) {
4510 if (!error)
4511 error = EIO;
4512 nfs_node_unlock(np);
4513 goto out;
4514 }
4515 name = vname;
4516 }
4517 filename = &smallname[0];
4518 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4519 if (namelen >= sizeof(smallname)) {
4520 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
4521 if (!filename) {
4522 error = ENOMEM;
4523 goto out;
4524 }
4525 snprintf(filename, namelen+1, "%s", name);
4526 }
4527 nfs_node_unlock(np);
4528 bzero(&cn, sizeof(cn));
4529 cn.cn_nameptr = filename;
4530 cn.cn_namelen = namelen;
4531
4532 restart:
4533 done = 0;
4534 if ((error = nfs_mount_state_in_use_start(nmp, thd)))
4535 goto out;
4536
4537 if (nofp->nof_rw)
4538 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4539 if (!error && nofp->nof_w)
4540 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4541 if (!error && nofp->nof_r)
4542 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4543
4544 if (nfs_mount_state_in_use_end(nmp, error)) {
4545 if (error == NFSERR_GRACE)
4546 goto restart;
4547 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4548 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4549 error = 0;
4550 goto out;
4551 }
4552 done = 1;
4553 out:
4554 if (error && (error != EINTR) && (error != ERESTART))
4555 nfs_revoke_open_state_for_node(np);
4556 lck_mtx_lock(&nofp->nof_lock);
4557 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4558 if (done)
4559 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4560 else if (error)
4561 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4562 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4563 lck_mtx_unlock(&nofp->nof_lock);
4564 if (filename && (filename != &smallname[0]))
4565 FREE(filename, M_TEMP);
4566 if (vname)
4567 vnode_putname(vname);
4568 if (dvp != NULLVP)
4569 vnode_put(dvp);
4570 return (error);
4571 }
4572
4573 /*
4574 * Send a normal OPEN RPC to open/create a file.
4575 */
4576 int
4577 nfs4_open_rpc(
4578 struct nfs_open_file *nofp,
4579 vfs_context_t ctx,
4580 struct componentname *cnp,
4581 struct vnode_attr *vap,
4582 vnode_t dvp,
4583 vnode_t *vpp,
4584 int create,
4585 int share_access,
4586 int share_deny)
4587 {
4588 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4589 cnp, vap, dvp, vpp, create, share_access, share_deny));
4590 }
4591
4592 /*
4593 * Send an OPEN RPC to reopen a file.
4594 */
4595 int
4596 nfs4_open_reopen_rpc(
4597 struct nfs_open_file *nofp,
4598 thread_t thd,
4599 kauth_cred_t cred,
4600 struct componentname *cnp,
4601 vnode_t dvp,
4602 vnode_t *vpp,
4603 int share_access,
4604 int share_deny)
4605 {
4606 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny));
4607 }
4608
4609 /*
4610 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4611 */
4612 int
4613 nfs4_open_confirm_rpc(
4614 struct nfsmount *nmp,
4615 nfsnode_t dnp,
4616 u_char *fhp,
4617 int fhlen,
4618 struct nfs_open_owner *noop,
4619 nfs_stateid *sid,
4620 thread_t thd,
4621 kauth_cred_t cred,
4622 struct nfs_vattr *nvap,
4623 uint64_t *xidp)
4624 {
4625 struct nfsm_chain nmreq, nmrep;
4626 int error = 0, status, numops;
4627 struct nfsreq_secinfo_args si;
4628
4629 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
4630 nfsm_chain_null(&nmreq);
4631 nfsm_chain_null(&nmrep);
4632
4633 // PUTFH, OPEN_CONFIRM, GETATTR
4634 numops = 3;
4635 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4636 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
4637 numops--;
4638 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4639 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4640 numops--;
4641 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4642 nfsm_chain_add_stateid(error, &nmreq, sid);
4643 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4644 numops--;
4645 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4646 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4647 nfsm_chain_build_done(error, &nmreq);
4648 nfsm_assert(error, (numops == 0), EPROTO);
4649 nfsmout_if(error);
4650 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
4651
4652 nfsm_chain_skip_tag(error, &nmrep);
4653 nfsm_chain_get_32(error, &nmrep, numops);
4654 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4655 nfsmout_if(error);
4656 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4657 nfs_owner_seqid_increment(noop, NULL, error);
4658 nfsm_chain_get_stateid(error, &nmrep, sid);
4659 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4660 nfsmout_if(error);
4661 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
4662 nfsmout:
4663 nfsm_chain_cleanup(&nmreq);
4664 nfsm_chain_cleanup(&nmrep);
4665 return (error);
4666 }
4667
4668 /*
4669 * common OPEN RPC code
4670 *
4671 * If create is set, ctx must be passed in.
4672 * Returns a node on success if no node passed in.
4673 */
4674 int
4675 nfs4_open_rpc_internal(
4676 struct nfs_open_file *nofp,
4677 vfs_context_t ctx,
4678 thread_t thd,
4679 kauth_cred_t cred,
4680 struct componentname *cnp,
4681 struct vnode_attr *vap,
4682 vnode_t dvp,
4683 vnode_t *vpp,
4684 int create,
4685 int share_access,
4686 int share_deny)
4687 {
4688 struct nfsmount *nmp;
4689 struct nfs_open_owner *noop = nofp->nof_owner;
4690 struct nfs_vattr nvattr;
4691 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
4692 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
4693 u_int64_t xid, savedxid = 0;
4694 nfsnode_t dnp = VTONFS(dvp);
4695 nfsnode_t np, newnp = NULL;
4696 vnode_t newvp = NULL;
4697 struct nfsm_chain nmreq, nmrep;
4698 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4699 uint32_t rflags, delegation, recall;
4700 struct nfs_stateid stateid, dstateid, *sid;
4701 fhandle_t fh;
4702 struct nfsreq rq, *req = &rq;
4703 struct nfs_dulookup dul;
4704 char sbuf[64], *s;
4705 uint32_t ace_type, ace_flags, ace_mask, len, slen;
4706 struct kauth_ace ace;
4707 struct nfsreq_secinfo_args si;
4708
4709 if (create && !ctx)
4710 return (EINVAL);
4711
4712 nmp = VTONMP(dvp);
4713 if (nfs_mount_gone(nmp))
4714 return (ENXIO);
4715 nfsvers = nmp->nm_vers;
4716 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4717 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
4718 return (EINVAL);
4719
4720 np = *vpp ? VTONFS(*vpp) : NULL;
4721 if (create && vap) {
4722 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4723 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4724 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4725 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4726 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)))
4727 vap->va_vaflags |= VA_UTIMES_NULL;
4728 } else {
4729 exclusive = gotuid = gotgid = 0;
4730 }
4731 if (nofp) {
4732 sid = &nofp->nof_stateid;
4733 } else {
4734 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4735 sid = &stateid;
4736 }
4737
4738 if ((error = nfs_open_owner_set_busy(noop, thd)))
4739 return (error);
4740 again:
4741 rflags = delegation = recall = 0;
4742 ace.ace_flags = 0;
4743 s = sbuf;
4744 slen = sizeof(sbuf);
4745 NVATTR_INIT(&nvattr);
4746 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
4747
4748 nfsm_chain_null(&nmreq);
4749 nfsm_chain_null(&nmrep);
4750
4751 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4752 numops = 6;
4753 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4754 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
4755 numops--;
4756 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4757 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4758 numops--;
4759 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4760 numops--;
4761 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4762 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4763 nfsm_chain_add_32(error, &nmreq, share_access);
4764 nfsm_chain_add_32(error, &nmreq, share_deny);
4765 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
4766 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4767 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
4768 nfsm_chain_add_32(error, &nmreq, create);
4769 if (create) {
4770 if (exclusive) {
4771 static uint32_t create_verf; // XXX need a better verifier
4772 create_verf++;
4773 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4774 /* insert 64 bit verifier */
4775 nfsm_chain_add_32(error, &nmreq, create_verf);
4776 nfsm_chain_add_32(error, &nmreq, create_verf);
4777 } else {
4778 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4779 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4780 }
4781 }
4782 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
4783 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4784 numops--;
4785 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4786 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4787 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4788 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
4789 numops--;
4790 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4791 numops--;
4792 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4793 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4794 nfsm_chain_build_done(error, &nmreq);
4795 nfsm_assert(error, (numops == 0), EPROTO);
4796 if (!error)
4797 error = busyerror = nfs_node_set_busy(dnp, thd);
4798 nfsmout_if(error);
4799
4800 if (create && !namedattrs)
4801 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4802
4803 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
4804 if (!error) {
4805 if (create && !namedattrs)
4806 nfs_dulookup_start(&dul, dnp, ctx);
4807 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4808 savedxid = xid;
4809 }
4810
4811 if (create && !namedattrs)
4812 nfs_dulookup_finish(&dul, dnp, ctx);
4813
4814 if ((lockerror = nfs_node_lock(dnp)))
4815 error = lockerror;
4816 nfsm_chain_skip_tag(error, &nmrep);
4817 nfsm_chain_get_32(error, &nmrep, numops);
4818 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4819 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4820 nfsmout_if(error);
4821 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4822 nfs_owner_seqid_increment(noop, NULL, error);
4823 nfsm_chain_get_stateid(error, &nmrep, sid);
4824 nfsm_chain_check_change_info(error, &nmrep, dnp);
4825 nfsm_chain_get_32(error, &nmrep, rflags);
4826 bmlen = NFS_ATTR_BITMAP_LEN;
4827 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4828 nfsm_chain_get_32(error, &nmrep, delegation);
4829 if (!error)
4830 switch (delegation) {
4831 case NFS_OPEN_DELEGATE_NONE:
4832 break;
4833 case NFS_OPEN_DELEGATE_READ:
4834 case NFS_OPEN_DELEGATE_WRITE:
4835 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4836 nfsm_chain_get_32(error, &nmrep, recall);
4837 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
4838 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4839 /* if we have any trouble accepting the ACE, just invalidate it */
4840 ace_type = ace_flags = ace_mask = len = 0;
4841 nfsm_chain_get_32(error, &nmrep, ace_type);
4842 nfsm_chain_get_32(error, &nmrep, ace_flags);
4843 nfsm_chain_get_32(error, &nmrep, ace_mask);
4844 nfsm_chain_get_32(error, &nmrep, len);
4845 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
4846 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
4847 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
4848 if (!error && (len >= slen)) {
4849 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
4850 if (s)
4851 slen = len+1;
4852 else
4853 ace.ace_flags = 0;
4854 }
4855 if (s)
4856 nfsm_chain_get_opaque(error, &nmrep, len, s);
4857 else
4858 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
4859 if (!error && s) {
4860 s[len] = '\0';
4861 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
4862 ace.ace_flags = 0;
4863 }
4864 if (error || !s)
4865 ace.ace_flags = 0;
4866 if (s && (s != sbuf))
4867 FREE(s, M_TEMP);
4868 break;
4869 default:
4870 error = EBADRPC;
4871 break;
4872 }
4873 /* At this point if we have no error, the object was created/opened. */
4874 open_error = error;
4875 nfsmout_if(error);
4876 if (create && vap && !exclusive)
4877 nfs_vattr_set_supported(bitmap, vap);
4878 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4879 nfsmout_if(error);
4880 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
4881 nfsmout_if(error);
4882 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4883 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
4884 error = EBADRPC;
4885 goto nfsmout;
4886 }
4887 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4888 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4889 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4890 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
4891 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
4892 }
4893 /* directory attributes: if we don't get them, make sure to invalidate */
4894 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4895 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4896 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
4897 if (error)
4898 NATTRINVALIDATE(dnp);
4899 nfsmout_if(error);
4900
4901 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4902 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4903
4904 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4905 nfs_node_unlock(dnp);
4906 lockerror = ENOENT;
4907 NVATTR_CLEANUP(&nvattr);
4908 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
4909 nfsmout_if(error);
4910 savedxid = xid;
4911 if ((lockerror = nfs_node_lock(dnp)))
4912 error = lockerror;
4913 }
4914
4915 nfsmout:
4916 nfsm_chain_cleanup(&nmreq);
4917 nfsm_chain_cleanup(&nmrep);
4918
4919 if (!lockerror && create) {
4920 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4921 dnp->n_flag &= ~NNEGNCENTRIES;
4922 cache_purge_negatives(dvp);
4923 }
4924 dnp->n_flag |= NMODIFIED;
4925 nfs_node_unlock(dnp);
4926 lockerror = ENOENT;
4927 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4928 }
4929 if (!lockerror)
4930 nfs_node_unlock(dnp);
4931 if (!error && !np && fh.fh_len) {
4932 /* create the vnode with the filehandle and attributes */
4933 xid = savedxid;
4934 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
4935 if (!error)
4936 newvp = NFSTOV(newnp);
4937 }
4938 NVATTR_CLEANUP(&nvattr);
4939 if (!busyerror)
4940 nfs_node_clear_busy(dnp);
4941 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4942 if (!np)
4943 np = newnp;
4944 if (!error && np && !recall) {
4945 /* stuff the delegation state in the node */
4946 lck_mtx_lock(&np->n_openlock);
4947 np->n_openflags &= ~N_DELEG_MASK;
4948 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4949 np->n_dstateid = dstateid;
4950 np->n_dace = ace;
4951 if (np->n_dlink.tqe_next == NFSNOLIST) {
4952 lck_mtx_lock(&nmp->nm_lock);
4953 if (np->n_dlink.tqe_next == NFSNOLIST)
4954 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4955 lck_mtx_unlock(&nmp->nm_lock);
4956 }
4957 lck_mtx_unlock(&np->n_openlock);
4958 } else {
4959 /* give the delegation back */
4960 if (np) {
4961 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4962 /* update delegation state and return it */
4963 lck_mtx_lock(&np->n_openlock);
4964 np->n_openflags &= ~N_DELEG_MASK;
4965 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4966 np->n_dstateid = dstateid;
4967 np->n_dace = ace;
4968 if (np->n_dlink.tqe_next == NFSNOLIST) {
4969 lck_mtx_lock(&nmp->nm_lock);
4970 if (np->n_dlink.tqe_next == NFSNOLIST)
4971 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4972 lck_mtx_unlock(&nmp->nm_lock);
4973 }
4974 lck_mtx_unlock(&np->n_openlock);
4975 /* don't need to send a separate delegreturn for fh */
4976 fh.fh_len = 0;
4977 }
4978 /* return np's current delegation */
4979 nfs4_delegation_return(np, 0, thd, cred);
4980 }
4981 if (fh.fh_len) /* return fh's delegation if it wasn't for np */
4982 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
4983 }
4984 }
4985 if (error) {
4986 if (exclusive && (error == NFSERR_NOTSUPP)) {
4987 exclusive = 0;
4988 goto again;
4989 }
4990 if (newvp) {
4991 nfs_node_unlock(newnp);
4992 vnode_put(newvp);
4993 }
4994 } else if (create) {
4995 nfs_node_unlock(newnp);
4996 if (exclusive) {
4997 error = nfs4_setattr_rpc(newnp, vap, ctx);
4998 if (error && (gotuid || gotgid)) {
4999 /* it's possible the server didn't like our attempt to set IDs. */
5000 /* so, let's try it again without those */
5001 VATTR_CLEAR_ACTIVE(vap, va_uid);
5002 VATTR_CLEAR_ACTIVE(vap, va_gid);
5003 error = nfs4_setattr_rpc(newnp, vap, ctx);
5004 }
5005 }
5006 if (error)
5007 vnode_put(newvp);
5008 else
5009 *vpp = newvp;
5010 }
5011 nfs_open_owner_clear_busy(noop);
5012 return (error);
5013 }
5014
5015
5016 /*
5017 * Send an OPEN RPC to claim a delegated open for a file
5018 */
5019 int
5020 nfs4_claim_delegated_open_rpc(
5021 struct nfs_open_file *nofp,
5022 int share_access,
5023 int share_deny,
5024 int flags)
5025 {
5026 struct nfsmount *nmp;
5027 struct nfs_open_owner *noop = nofp->nof_owner;
5028 struct nfs_vattr nvattr;
5029 int error = 0, lockerror = ENOENT, status;
5030 int nfsvers, numops;
5031 u_int64_t xid;
5032 nfsnode_t np = nofp->nof_np;
5033 struct nfsm_chain nmreq, nmrep;
5034 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5035 uint32_t rflags = 0, delegation, recall = 0;
5036 fhandle_t fh;
5037 struct nfs_stateid dstateid;
5038 char sbuf[64], *s = sbuf;
5039 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5040 struct kauth_ace ace;
5041 vnode_t dvp = NULL;
5042 const char *vname = NULL;
5043 const char *name = NULL;
5044 size_t namelen;
5045 char smallname[128];
5046 char *filename = NULL;
5047 struct nfsreq_secinfo_args si;
5048
5049 nmp = NFSTONMP(np);
5050 if (nfs_mount_gone(nmp))
5051 return (ENXIO);
5052 nfsvers = nmp->nm_vers;
5053
5054 nfs_node_lock_force(np);
5055 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5056 /*
5057 * The node's been sillyrenamed, so we need to use
5058 * the sillyrename directory/name to do the open.
5059 */
5060 struct nfs_sillyrename *nsp = np->n_sillyrename;
5061 dvp = NFSTOV(nsp->nsr_dnp);
5062 if ((error = vnode_get(dvp))) {
5063 nfs_node_unlock(np);
5064 goto out;
5065 }
5066 name = nsp->nsr_name;
5067 } else {
5068 /*
5069 * [sigh] We can't trust VFS to get the parent right for named
5070 * attribute nodes. (It likes to reparent the nodes after we've
5071 * created them.) Luckily we can probably get the right parent
5072 * from the n_parent we have stashed away.
5073 */
5074 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5075 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
5076 dvp = NULL;
5077 if (!dvp)
5078 dvp = vnode_getparent(NFSTOV(np));
5079 vname = vnode_getname(NFSTOV(np));
5080 if (!dvp || !vname) {
5081 if (!error)
5082 error = EIO;
5083 nfs_node_unlock(np);
5084 goto out;
5085 }
5086 name = vname;
5087 }
5088 filename = &smallname[0];
5089 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5090 if (namelen >= sizeof(smallname)) {
5091 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
5092 if (!filename) {
5093 error = ENOMEM;
5094 nfs_node_unlock(np);
5095 goto out;
5096 }
5097 snprintf(filename, namelen+1, "%s", name);
5098 }
5099 nfs_node_unlock(np);
5100
5101 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5102 goto out;
5103 NVATTR_INIT(&nvattr);
5104 delegation = NFS_OPEN_DELEGATE_NONE;
5105 dstateid = np->n_dstateid;
5106 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5107
5108 nfsm_chain_null(&nmreq);
5109 nfsm_chain_null(&nmrep);
5110
5111 // PUTFH, OPEN, GETATTR(FH)
5112 numops = 3;
5113 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5114 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5115 numops--;
5116 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5117 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5118 numops--;
5119 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5120 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5121 nfsm_chain_add_32(error, &nmreq, share_access);
5122 nfsm_chain_add_32(error, &nmreq, share_deny);
5123 // open owner: clientid + uid
5124 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5125 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5126 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5127 // openflag4
5128 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5129 // open_claim4
5130 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5131 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5132 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5133 numops--;
5134 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5135 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5136 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5137 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5138 nfsm_chain_build_done(error, &nmreq);
5139 nfsm_assert(error, (numops == 0), EPROTO);
5140 nfsmout_if(error);
5141
5142 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5143 noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5144
5145 if ((lockerror = nfs_node_lock(np)))
5146 error = lockerror;
5147 nfsm_chain_skip_tag(error, &nmrep);
5148 nfsm_chain_get_32(error, &nmrep, numops);
5149 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5150 nfsmout_if(error);
5151 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5152 nfs_owner_seqid_increment(noop, NULL, error);
5153 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5154 nfsm_chain_check_change_info(error, &nmrep, np);
5155 nfsm_chain_get_32(error, &nmrep, rflags);
5156 bmlen = NFS_ATTR_BITMAP_LEN;
5157 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5158 nfsm_chain_get_32(error, &nmrep, delegation);
5159 if (!error)
5160 switch (delegation) {
5161 case NFS_OPEN_DELEGATE_NONE:
5162 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5163 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5164 break;
5165 case NFS_OPEN_DELEGATE_READ:
5166 case NFS_OPEN_DELEGATE_WRITE:
5167 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5168 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5169 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5170 (delegation == NFS_OPEN_DELEGATE_READ)))
5171 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5172 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5173 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5174 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5175 nfsm_chain_get_32(error, &nmrep, recall);
5176 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5177 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5178 /* if we have any trouble accepting the ACE, just invalidate it */
5179 ace_type = ace_flags = ace_mask = len = 0;
5180 nfsm_chain_get_32(error, &nmrep, ace_type);
5181 nfsm_chain_get_32(error, &nmrep, ace_flags);
5182 nfsm_chain_get_32(error, &nmrep, ace_mask);
5183 nfsm_chain_get_32(error, &nmrep, len);
5184 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5185 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5186 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5187 if (!error && (len >= slen)) {
5188 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5189 if (s)
5190 slen = len+1;
5191 else
5192 ace.ace_flags = 0;
5193 }
5194 if (s)
5195 nfsm_chain_get_opaque(error, &nmrep, len, s);
5196 else
5197 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5198 if (!error && s) {
5199 s[len] = '\0';
5200 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5201 ace.ace_flags = 0;
5202 }
5203 if (error || !s)
5204 ace.ace_flags = 0;
5205 if (s && (s != sbuf))
5206 FREE(s, M_TEMP);
5207 if (!error) {
5208 /* stuff the latest delegation state in the node */
5209 lck_mtx_lock(&np->n_openlock);
5210 np->n_openflags &= ~N_DELEG_MASK;
5211 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5212 np->n_dstateid = dstateid;
5213 np->n_dace = ace;
5214 if (np->n_dlink.tqe_next == NFSNOLIST) {
5215 lck_mtx_lock(&nmp->nm_lock);
5216 if (np->n_dlink.tqe_next == NFSNOLIST)
5217 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5218 lck_mtx_unlock(&nmp->nm_lock);
5219 }
5220 lck_mtx_unlock(&np->n_openlock);
5221 }
5222 break;
5223 default:
5224 error = EBADRPC;
5225 break;
5226 }
5227 nfsmout_if(error);
5228 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5229 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5230 nfsmout_if(error);
5231 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5232 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5233 error = EBADRPC;
5234 goto nfsmout;
5235 }
5236 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5237 // XXX what if fh doesn't match the vnode we think we're re-opening?
5238 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5239 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5240 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5241 }
5242 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5243 nfsmout_if(error);
5244 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5245 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5246 nfsmout:
5247 NVATTR_CLEANUP(&nvattr);
5248 nfsm_chain_cleanup(&nmreq);
5249 nfsm_chain_cleanup(&nmrep);
5250 if (!lockerror)
5251 nfs_node_unlock(np);
5252 nfs_open_owner_clear_busy(noop);
5253 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5254 if (recall) {
5255 /*
5256 * We're making a delegated claim.
5257 * Don't return the delegation here in case we have more to claim.
5258 * Just make sure it's queued up to be returned.
5259 */
5260 nfs4_delegation_return_enqueue(np);
5261 }
5262 }
5263 out:
5264 // if (!error)
5265 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5266 if (filename && (filename != &smallname[0]))
5267 FREE(filename, M_TEMP);
5268 if (vname)
5269 vnode_putname(vname);
5270 if (dvp != NULLVP)
5271 vnode_put(dvp);
5272 return (error);
5273 }
5274
5275 /*
5276 * Send an OPEN RPC to reclaim an open file.
5277 */
5278 int
5279 nfs4_open_reclaim_rpc(
5280 struct nfs_open_file *nofp,
5281 int share_access,
5282 int share_deny)
5283 {
5284 struct nfsmount *nmp;
5285 struct nfs_open_owner *noop = nofp->nof_owner;
5286 struct nfs_vattr nvattr;
5287 int error = 0, lockerror = ENOENT, status;
5288 int nfsvers, numops;
5289 u_int64_t xid;
5290 nfsnode_t np = nofp->nof_np;
5291 struct nfsm_chain nmreq, nmrep;
5292 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5293 uint32_t rflags = 0, delegation, recall = 0;
5294 fhandle_t fh;
5295 struct nfs_stateid dstateid;
5296 char sbuf[64], *s = sbuf;
5297 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5298 struct kauth_ace ace;
5299 struct nfsreq_secinfo_args si;
5300
5301 nmp = NFSTONMP(np);
5302 if (nfs_mount_gone(nmp))
5303 return (ENXIO);
5304 nfsvers = nmp->nm_vers;
5305
5306 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5307 return (error);
5308
5309 NVATTR_INIT(&nvattr);
5310 delegation = NFS_OPEN_DELEGATE_NONE;
5311 dstateid = np->n_dstateid;
5312 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5313
5314 nfsm_chain_null(&nmreq);
5315 nfsm_chain_null(&nmrep);
5316
5317 // PUTFH, OPEN, GETATTR(FH)
5318 numops = 3;
5319 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5320 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5321 numops--;
5322 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5323 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5324 numops--;
5325 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5326 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5327 nfsm_chain_add_32(error, &nmreq, share_access);
5328 nfsm_chain_add_32(error, &nmreq, share_deny);
5329 // open owner: clientid + uid
5330 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5331 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5332 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5333 // openflag4
5334 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5335 // open_claim4
5336 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5337 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5338 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5339 NFS_OPEN_DELEGATE_NONE;
5340 nfsm_chain_add_32(error, &nmreq, delegation);
5341 delegation = NFS_OPEN_DELEGATE_NONE;
5342 numops--;
5343 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5344 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5345 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5346 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5347 nfsm_chain_build_done(error, &nmreq);
5348 nfsm_assert(error, (numops == 0), EPROTO);
5349 nfsmout_if(error);
5350
5351 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5352 noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status);
5353
5354 if ((lockerror = nfs_node_lock(np)))
5355 error = lockerror;
5356 nfsm_chain_skip_tag(error, &nmrep);
5357 nfsm_chain_get_32(error, &nmrep, numops);
5358 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5359 nfsmout_if(error);
5360 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5361 nfs_owner_seqid_increment(noop, NULL, error);
5362 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5363 nfsm_chain_check_change_info(error, &nmrep, np);
5364 nfsm_chain_get_32(error, &nmrep, rflags);
5365 bmlen = NFS_ATTR_BITMAP_LEN;
5366 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5367 nfsm_chain_get_32(error, &nmrep, delegation);
5368 if (!error)
5369 switch (delegation) {
5370 case NFS_OPEN_DELEGATE_NONE:
5371 if (np->n_openflags & N_DELEG_MASK) {
5372 /*
5373 * Hey! We were supposed to get our delegation back even
5374 * if it was getting immediately recalled. Bad server!
5375 *
5376 * Just try to return the existing delegation.
5377 */
5378 // NP(np, "nfs: open reclaim didn't return delegation?");
5379 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5380 recall = 1;
5381 }
5382 break;
5383 case NFS_OPEN_DELEGATE_READ:
5384 case NFS_OPEN_DELEGATE_WRITE:
5385 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5386 nfsm_chain_get_32(error, &nmrep, recall);
5387 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5388 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5389 /* if we have any trouble accepting the ACE, just invalidate it */
5390 ace_type = ace_flags = ace_mask = len = 0;
5391 nfsm_chain_get_32(error, &nmrep, ace_type);
5392 nfsm_chain_get_32(error, &nmrep, ace_flags);
5393 nfsm_chain_get_32(error, &nmrep, ace_mask);
5394 nfsm_chain_get_32(error, &nmrep, len);
5395 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5396 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5397 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5398 if (!error && (len >= slen)) {
5399 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5400 if (s)
5401 slen = len+1;
5402 else
5403 ace.ace_flags = 0;
5404 }
5405 if (s)
5406 nfsm_chain_get_opaque(error, &nmrep, len, s);
5407 else
5408 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5409 if (!error && s) {
5410 s[len] = '\0';
5411 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5412 ace.ace_flags = 0;
5413 }
5414 if (error || !s)
5415 ace.ace_flags = 0;
5416 if (s && (s != sbuf))
5417 FREE(s, M_TEMP);
5418 if (!error) {
5419 /* stuff the delegation state in the node */
5420 lck_mtx_lock(&np->n_openlock);
5421 np->n_openflags &= ~N_DELEG_MASK;
5422 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5423 np->n_dstateid = dstateid;
5424 np->n_dace = ace;
5425 if (np->n_dlink.tqe_next == NFSNOLIST) {
5426 lck_mtx_lock(&nmp->nm_lock);
5427 if (np->n_dlink.tqe_next == NFSNOLIST)
5428 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5429 lck_mtx_unlock(&nmp->nm_lock);
5430 }
5431 lck_mtx_unlock(&np->n_openlock);
5432 }
5433 break;
5434 default:
5435 error = EBADRPC;
5436 break;
5437 }
5438 nfsmout_if(error);
5439 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5440 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5441 nfsmout_if(error);
5442 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5443 NP(np, "nfs: open reclaim didn't return filehandle?");
5444 error = EBADRPC;
5445 goto nfsmout;
5446 }
5447 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5448 // XXX what if fh doesn't match the vnode we think we're re-opening?
5449 // That should be pretty hard in this case, given that we are doing
5450 // the open reclaim using the file handle (and not a dir/name pair).
5451 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5452 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5453 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5454 }
5455 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5456 nfsmout_if(error);
5457 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5458 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5459 nfsmout:
5460 // if (!error)
5461 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5462 NVATTR_CLEANUP(&nvattr);
5463 nfsm_chain_cleanup(&nmreq);
5464 nfsm_chain_cleanup(&nmrep);
5465 if (!lockerror)
5466 nfs_node_unlock(np);
5467 nfs_open_owner_clear_busy(noop);
5468 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5469 if (recall)
5470 nfs4_delegation_return_enqueue(np);
5471 }
5472 return (error);
5473 }
5474
5475 int
5476 nfs4_open_downgrade_rpc(
5477 nfsnode_t np,
5478 struct nfs_open_file *nofp,
5479 vfs_context_t ctx)
5480 {
5481 struct nfs_open_owner *noop = nofp->nof_owner;
5482 struct nfsmount *nmp;
5483 int error, lockerror = ENOENT, status, nfsvers, numops;
5484 struct nfsm_chain nmreq, nmrep;
5485 u_int64_t xid;
5486 struct nfsreq_secinfo_args si;
5487
5488 nmp = NFSTONMP(np);
5489 if (nfs_mount_gone(nmp))
5490 return (ENXIO);
5491 nfsvers = nmp->nm_vers;
5492
5493 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5494 return (error);
5495
5496 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5497 nfsm_chain_null(&nmreq);
5498 nfsm_chain_null(&nmrep);
5499
5500 // PUTFH, OPEN_DOWNGRADE, GETATTR
5501 numops = 3;
5502 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5503 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5504 numops--;
5505 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5506 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5507 numops--;
5508 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5509 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5510 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5511 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5512 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5513 numops--;
5514 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5515 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5516 nfsm_chain_build_done(error, &nmreq);
5517 nfsm_assert(error, (numops == 0), EPROTO);
5518 nfsmout_if(error);
5519 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5520 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5521 &si, R_NOINTR, &nmrep, &xid, &status);
5522
5523 if ((lockerror = nfs_node_lock(np)))
5524 error = lockerror;
5525 nfsm_chain_skip_tag(error, &nmrep);
5526 nfsm_chain_get_32(error, &nmrep, numops);
5527 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5528 nfsmout_if(error);
5529 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5530 nfs_owner_seqid_increment(noop, NULL, error);
5531 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5532 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5533 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5534 nfsmout:
5535 if (!lockerror)
5536 nfs_node_unlock(np);
5537 nfs_open_owner_clear_busy(noop);
5538 nfsm_chain_cleanup(&nmreq);
5539 nfsm_chain_cleanup(&nmrep);
5540 return (error);
5541 }
5542
5543 int
5544 nfs4_close_rpc(
5545 nfsnode_t np,
5546 struct nfs_open_file *nofp,
5547 thread_t thd,
5548 kauth_cred_t cred,
5549 int flags)
5550 {
5551 struct nfs_open_owner *noop = nofp->nof_owner;
5552 struct nfsmount *nmp;
5553 int error, lockerror = ENOENT, status, nfsvers, numops;
5554 struct nfsm_chain nmreq, nmrep;
5555 u_int64_t xid;
5556 struct nfsreq_secinfo_args si;
5557
5558 nmp = NFSTONMP(np);
5559 if (nfs_mount_gone(nmp))
5560 return (ENXIO);
5561 nfsvers = nmp->nm_vers;
5562
5563 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5564 return (error);
5565
5566 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5567 nfsm_chain_null(&nmreq);
5568 nfsm_chain_null(&nmrep);
5569
5570 // PUTFH, CLOSE, GETATTR
5571 numops = 3;
5572 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5573 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
5574 numops--;
5575 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5576 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5577 numops--;
5578 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
5579 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5580 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5581 numops--;
5582 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5583 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5584 nfsm_chain_build_done(error, &nmreq);
5585 nfsm_assert(error, (numops == 0), EPROTO);
5586 nfsmout_if(error);
5587 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5588
5589 if ((lockerror = nfs_node_lock(np)))
5590 error = lockerror;
5591 nfsm_chain_skip_tag(error, &nmrep);
5592 nfsm_chain_get_32(error, &nmrep, numops);
5593 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5594 nfsmout_if(error);
5595 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
5596 nfs_owner_seqid_increment(noop, NULL, error);
5597 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5598 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5599 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5600 nfsmout:
5601 if (!lockerror)
5602 nfs_node_unlock(np);
5603 nfs_open_owner_clear_busy(noop);
5604 nfsm_chain_cleanup(&nmreq);
5605 nfsm_chain_cleanup(&nmrep);
5606 return (error);
5607 }
5608
5609
5610 /*
5611 * Claim the delegated open combinations this open file holds.
5612 */
5613 int
5614 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
5615 {
5616 struct nfs_open_owner *noop = nofp->nof_owner;
5617 struct nfs_lock_owner *nlop;
5618 struct nfs_file_lock *nflp, *nextnflp;
5619 struct nfsmount *nmp;
5620 int error = 0, reopen = 0;
5621
5622 if (nofp->nof_d_rw_drw) {
5623 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
5624 if (!error) {
5625 lck_mtx_lock(&nofp->nof_lock);
5626 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
5627 nofp->nof_d_rw_drw = 0;
5628 lck_mtx_unlock(&nofp->nof_lock);
5629 }
5630 }
5631 if (!error && nofp->nof_d_w_drw) {
5632 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
5633 if (!error) {
5634 lck_mtx_lock(&nofp->nof_lock);
5635 nofp->nof_w_drw += nofp->nof_d_w_drw;
5636 nofp->nof_d_w_drw = 0;
5637 lck_mtx_unlock(&nofp->nof_lock);
5638 }
5639 }
5640 if (!error && nofp->nof_d_r_drw) {
5641 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
5642 if (!error) {
5643 lck_mtx_lock(&nofp->nof_lock);
5644 nofp->nof_r_drw += nofp->nof_d_r_drw;
5645 nofp->nof_d_r_drw = 0;
5646 lck_mtx_unlock(&nofp->nof_lock);
5647 }
5648 }
5649 if (!error && nofp->nof_d_rw_dw) {
5650 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
5651 if (!error) {
5652 lck_mtx_lock(&nofp->nof_lock);
5653 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
5654 nofp->nof_d_rw_dw = 0;
5655 lck_mtx_unlock(&nofp->nof_lock);
5656 }
5657 }
5658 if (!error && nofp->nof_d_w_dw) {
5659 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
5660 if (!error) {
5661 lck_mtx_lock(&nofp->nof_lock);
5662 nofp->nof_w_dw += nofp->nof_d_w_dw;
5663 nofp->nof_d_w_dw = 0;
5664 lck_mtx_unlock(&nofp->nof_lock);
5665 }
5666 }
5667 if (!error && nofp->nof_d_r_dw) {
5668 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
5669 if (!error) {
5670 lck_mtx_lock(&nofp->nof_lock);
5671 nofp->nof_r_dw += nofp->nof_d_r_dw;
5672 nofp->nof_d_r_dw = 0;
5673 lck_mtx_unlock(&nofp->nof_lock);
5674 }
5675 }
5676 /* non-deny-mode opens may be reopened if no locks are held */
5677 if (!error && nofp->nof_d_rw) {
5678 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
5679 /* for some errors, we should just try reopening the file */
5680 if (nfs_mount_state_error_delegation_lost(error))
5681 reopen = error;
5682 if (!error || reopen) {
5683 lck_mtx_lock(&nofp->nof_lock);
5684 nofp->nof_rw += nofp->nof_d_rw;
5685 nofp->nof_d_rw = 0;
5686 lck_mtx_unlock(&nofp->nof_lock);
5687 }
5688 }
5689 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5690 if ((!error || reopen) && nofp->nof_d_w) {
5691 if (!error) {
5692 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
5693 /* for some errors, we should just try reopening the file */
5694 if (nfs_mount_state_error_delegation_lost(error))
5695 reopen = error;
5696 }
5697 if (!error || reopen) {
5698 lck_mtx_lock(&nofp->nof_lock);
5699 nofp->nof_w += nofp->nof_d_w;
5700 nofp->nof_d_w = 0;
5701 lck_mtx_unlock(&nofp->nof_lock);
5702 }
5703 }
5704 if ((!error || reopen) && nofp->nof_d_r) {
5705 if (!error) {
5706 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
5707 /* for some errors, we should just try reopening the file */
5708 if (nfs_mount_state_error_delegation_lost(error))
5709 reopen = error;
5710 }
5711 if (!error || reopen) {
5712 lck_mtx_lock(&nofp->nof_lock);
5713 nofp->nof_r += nofp->nof_d_r;
5714 nofp->nof_d_r = 0;
5715 lck_mtx_unlock(&nofp->nof_lock);
5716 }
5717 }
5718
5719 if (reopen) {
5720 /*
5721 * Any problems with the delegation probably indicates that we
5722 * should review/return all of our current delegation state.
5723 */
5724 if ((nmp = NFSTONMP(nofp->nof_np))) {
5725 nfs4_delegation_return_enqueue(nofp->nof_np);
5726 lck_mtx_lock(&nmp->nm_lock);
5727 nfs_need_recover(nmp, NFSERR_EXPIRED);
5728 lck_mtx_unlock(&nmp->nm_lock);
5729 }
5730 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
5731 /* just reopen the file on next access */
5732 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5733 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5734 lck_mtx_lock(&nofp->nof_lock);
5735 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
5736 lck_mtx_unlock(&nofp->nof_lock);
5737 return (0);
5738 }
5739 if (reopen)
5740 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5741 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5742 }
5743
5744 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
5745 /* claim delegated locks */
5746 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
5747 if (nlop->nlo_open_owner != noop)
5748 continue;
5749 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
5750 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5751 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5752 continue;
5753 /* skip non-delegated locks */
5754 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5755 continue;
5756 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
5757 if (error) {
5758 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5759 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5760 break;
5761 }
5762 // else {
5763 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5764 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5765 // }
5766 }
5767 if (error)
5768 break;
5769 }
5770 }
5771
5772 if (!error) /* all state claimed successfully! */
5773 return (0);
5774
5775 /* restart if it looks like a problem more than just losing the delegation */
5776 if (!nfs_mount_state_error_delegation_lost(error) &&
5777 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
5778 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5779 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np))))
5780 nfs_need_reconnect(nmp);
5781 return (error);
5782 }
5783
5784 /* delegated state lost (once held but now not claimable) */
5785 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5786
5787 /*
5788 * Any problems with the delegation probably indicates that we
5789 * should review/return all of our current delegation state.
5790 */
5791 if ((nmp = NFSTONMP(nofp->nof_np))) {
5792 nfs4_delegation_return_enqueue(nofp->nof_np);
5793 lck_mtx_lock(&nmp->nm_lock);
5794 nfs_need_recover(nmp, NFSERR_EXPIRED);
5795 lck_mtx_unlock(&nmp->nm_lock);
5796 }
5797
5798 /* revoke all open file state */
5799 nfs_revoke_open_state_for_node(nofp->nof_np);
5800
5801 return (error);
5802 }
5803
5804 /*
5805 * Release all open state for the given node.
5806 */
5807 void
5808 nfs_release_open_state_for_node(nfsnode_t np, int force)
5809 {
5810 struct nfsmount *nmp = NFSTONMP(np);
5811 struct nfs_open_file *nofp;
5812 struct nfs_file_lock *nflp, *nextnflp;
5813
5814 /* drop held locks */
5815 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
5816 /* skip dead & blocked lock requests */
5817 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5818 continue;
5819 /* send an unlock if not a delegated lock */
5820 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5821 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
5822 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
5823 /* kill/remove the lock */
5824 lck_mtx_lock(&np->n_openlock);
5825 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
5826 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
5827 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
5828 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
5829 if (nflp->nfl_blockcnt) {
5830 /* wake up anyone blocked on this lock */
5831 wakeup(nflp);
5832 } else {
5833 /* remove nflp from lock list and destroy */
5834 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
5835 nfs_file_lock_destroy(nflp);
5836 }
5837 lck_mtx_unlock(&np->n_openlock);
5838 }
5839
5840 lck_mtx_lock(&np->n_openlock);
5841
5842 /* drop all opens */
5843 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5844 if (nofp->nof_flags & NFS_OPEN_FILE_LOST)
5845 continue;
5846 /* mark open state as lost */
5847 lck_mtx_lock(&nofp->nof_lock);
5848 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5849 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
5850
5851 lck_mtx_unlock(&nofp->nof_lock);
5852 if (!force && nmp && (nmp->nm_vers >= NFS_VER4))
5853 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
5854 }
5855
5856 lck_mtx_unlock(&np->n_openlock);
5857 }
5858
5859 /*
5860 * State for a node has been lost, drop it, and revoke the node.
5861 * Attempt to return any state if possible in case the server
5862 * might somehow think we hold it.
5863 */
5864 void
5865 nfs_revoke_open_state_for_node(nfsnode_t np)
5866 {
5867 struct nfsmount *nmp;
5868
5869 /* mark node as needing to be revoked */
5870 nfs_node_lock_force(np);
5871 if (np->n_flag & NREVOKE) /* already revoked? */
5872 {
5873 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
5874 nfs_node_unlock(np);
5875 return;
5876 }
5877 np->n_flag |= NREVOKE;
5878 nfs_node_unlock(np);
5879
5880 nfs_release_open_state_for_node(np, 0);
5881 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
5882
5883 /* mark mount as needing a revoke scan and have the socket thread do it. */
5884 if ((nmp = NFSTONMP(np))) {
5885 lck_mtx_lock(&nmp->nm_lock);
5886 nmp->nm_state |= NFSSTA_REVOKE;
5887 nfs_mount_sock_thread_wake(nmp);
5888 lck_mtx_unlock(&nmp->nm_lock);
5889 }
5890 }
5891
5892 /*
5893 * Claim the delegated open combinations that each of this node's open files hold.
5894 */
5895 int
5896 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
5897 {
5898 struct nfs_open_file *nofp;
5899 int error = 0;
5900
5901 lck_mtx_lock(&np->n_openlock);
5902
5903 /* walk the open file list looking for opens with delegated state to claim */
5904 restart:
5905 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5906 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
5907 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
5908 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r)
5909 continue;
5910 lck_mtx_unlock(&np->n_openlock);
5911 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
5912 lck_mtx_lock(&np->n_openlock);
5913 if (error)
5914 break;
5915 goto restart;
5916 }
5917
5918 lck_mtx_unlock(&np->n_openlock);
5919
5920 return (error);
5921 }
5922
5923 /*
5924 * Mark a node as needed to have its delegation returned.
5925 * Queue it up on the delegation return queue.
5926 * Make sure the thread is running.
5927 */
5928 void
5929 nfs4_delegation_return_enqueue(nfsnode_t np)
5930 {
5931 struct nfsmount *nmp;
5932
5933 nmp = NFSTONMP(np);
5934 if (nfs_mount_gone(nmp))
5935 return;
5936
5937 lck_mtx_lock(&np->n_openlock);
5938 np->n_openflags |= N_DELEG_RETURN;
5939 lck_mtx_unlock(&np->n_openlock);
5940
5941 lck_mtx_lock(&nmp->nm_lock);
5942 if (np->n_dreturn.tqe_next == NFSNOLIST)
5943 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
5944 nfs_mount_sock_thread_wake(nmp);
5945 lck_mtx_unlock(&nmp->nm_lock);
5946 }
5947
5948 /*
5949 * return any delegation we may have for the given node
5950 */
5951 int
5952 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
5953 {
5954 struct nfsmount *nmp;
5955 fhandle_t fh;
5956 nfs_stateid dstateid;
5957 int error;
5958
5959 nmp = NFSTONMP(np);
5960 if (nfs_mount_gone(nmp))
5961 return (ENXIO);
5962
5963 /* first, make sure the node's marked for delegation return */
5964 lck_mtx_lock(&np->n_openlock);
5965 np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING);
5966 lck_mtx_unlock(&np->n_openlock);
5967
5968 /* make sure nobody else is using the delegation state */
5969 if ((error = nfs_open_state_set_busy(np, NULL)))
5970 goto out;
5971
5972 /* claim any delegated state */
5973 if ((error = nfs4_claim_delegated_state_for_node(np, flags)))
5974 goto out;
5975
5976 /* return the delegation */
5977 lck_mtx_lock(&np->n_openlock);
5978 dstateid = np->n_dstateid;
5979 fh.fh_len = np->n_fhsize;
5980 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
5981 lck_mtx_unlock(&np->n_openlock);
5982 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
5983 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5984 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
5985 lck_mtx_lock(&np->n_openlock);
5986 np->n_openflags &= ~N_DELEG_MASK;
5987 lck_mtx_lock(&nmp->nm_lock);
5988 if (np->n_dlink.tqe_next != NFSNOLIST) {
5989 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
5990 np->n_dlink.tqe_next = NFSNOLIST;
5991 }
5992 lck_mtx_unlock(&nmp->nm_lock);
5993 lck_mtx_unlock(&np->n_openlock);
5994 }
5995
5996 out:
5997 /* make sure it's no longer on the return queue and clear the return flags */
5998 lck_mtx_lock(&nmp->nm_lock);
5999 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6000 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6001 np->n_dreturn.tqe_next = NFSNOLIST;
6002 }
6003 lck_mtx_unlock(&nmp->nm_lock);
6004 lck_mtx_lock(&np->n_openlock);
6005 np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING);
6006 lck_mtx_unlock(&np->n_openlock);
6007
6008 if (error) {
6009 NP(np, "nfs4_delegation_return, error %d", error);
6010 if (error == ETIMEDOUT)
6011 nfs_need_reconnect(nmp);
6012 if (nfs_mount_state_error_should_restart(error)) {
6013 /* make sure recovery happens */
6014 lck_mtx_lock(&nmp->nm_lock);
6015 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6016 lck_mtx_unlock(&nmp->nm_lock);
6017 }
6018 }
6019
6020 nfs_open_state_clear_busy(np);
6021
6022 return (error);
6023 }
6024
6025 /*
6026 * RPC to return a delegation for a file handle
6027 */
6028 int
6029 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6030 {
6031 int error = 0, status, numops;
6032 uint64_t xid;
6033 struct nfsm_chain nmreq, nmrep;
6034 struct nfsreq_secinfo_args si;
6035
6036 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6037 nfsm_chain_null(&nmreq);
6038 nfsm_chain_null(&nmrep);
6039
6040 // PUTFH, DELEGRETURN
6041 numops = 2;
6042 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6043 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6044 numops--;
6045 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6046 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6047 numops--;
6048 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6049 nfsm_chain_add_stateid(error, &nmreq, sid);
6050 nfsm_chain_build_done(error, &nmreq);
6051 nfsm_assert(error, (numops == 0), EPROTO);
6052 nfsmout_if(error);
6053 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6054 nfsm_chain_skip_tag(error, &nmrep);
6055 nfsm_chain_get_32(error, &nmrep, numops);
6056 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6057 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6058 nfsmout:
6059 nfsm_chain_cleanup(&nmreq);
6060 nfsm_chain_cleanup(&nmrep);
6061 return (error);
6062 }
6063
6064
6065 /*
6066 * NFS read call.
6067 * Just call nfs_bioread() to do the work.
6068 *
6069 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6070 * without first calling VNOP_OPEN, so we make sure the file is open here.
6071 */
6072 int
6073 nfs_vnop_read(
6074 struct vnop_read_args /* {
6075 struct vnodeop_desc *a_desc;
6076 vnode_t a_vp;
6077 struct uio *a_uio;
6078 int a_ioflag;
6079 vfs_context_t a_context;
6080 } */ *ap)
6081 {
6082 vnode_t vp = ap->a_vp;
6083 vfs_context_t ctx = ap->a_context;
6084 nfsnode_t np;
6085 struct nfsmount *nmp;
6086 struct nfs_open_owner *noop;
6087 struct nfs_open_file *nofp;
6088 int error;
6089
6090 if (vnode_vtype(ap->a_vp) != VREG)
6091 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6092
6093 np = VTONFS(vp);
6094 nmp = NFSTONMP(np);
6095 if (nfs_mount_gone(nmp))
6096 return (ENXIO);
6097 if (np->n_flag & NREVOKE)
6098 return (EIO);
6099
6100 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6101 if (!noop)
6102 return (ENOMEM);
6103 restart:
6104 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6105 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6106 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6107 error = EIO;
6108 }
6109 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6110 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6111 nofp = NULL;
6112 if (!error)
6113 goto restart;
6114 }
6115 if (error) {
6116 nfs_open_owner_rele(noop);
6117 return (error);
6118 }
6119 /*
6120 * Since the read path is a hot path, if we already have
6121 * read access, lets go and try and do the read, without
6122 * busying the mount and open file node for this open owner.
6123 *
6124 * N.B. This is inherently racy w.r.t. an execve using
6125 * an already open file, in that the read at the end of
6126 * this routine will be racing with a potential close.
6127 * The code below ultimately has the same problem. In practice
6128 * this does not seem to be an issue.
6129 */
6130 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6131 nfs_open_owner_rele(noop);
6132 goto do_read;
6133 }
6134 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6135 if (error) {
6136 nfs_open_owner_rele(noop);
6137 return (error);
6138 }
6139 /*
6140 * If we don't have a file already open with the access we need (read) then
6141 * we need to open one. Otherwise we just co-opt an open. We might not already
6142 * have access because we're trying to read the first page of the
6143 * file for execve.
6144 */
6145 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6146 if (error) {
6147 nfs_mount_state_in_use_end(nmp, 0);
6148 nfs_open_owner_rele(noop);
6149 return (error);
6150 }
6151 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6152 /* we don't have the file open, so open it for read access if we're not denied */
6153 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6154 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6155 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6156 }
6157 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6158 nfs_open_file_clear_busy(nofp);
6159 nfs_mount_state_in_use_end(nmp, 0);
6160 nfs_open_owner_rele(noop);
6161 return (EPERM);
6162 }
6163 if (np->n_flag & NREVOKE) {
6164 error = EIO;
6165 nfs_open_file_clear_busy(nofp);
6166 nfs_mount_state_in_use_end(nmp, 0);
6167 nfs_open_owner_rele(noop);
6168 return (error);
6169 }
6170 if (nmp->nm_vers < NFS_VER4) {
6171 /* NFS v2/v3 opens are always allowed - so just add it. */
6172 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6173 } else {
6174 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6175 }
6176 if (!error)
6177 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6178 }
6179 if (nofp)
6180 nfs_open_file_clear_busy(nofp);
6181 if (nfs_mount_state_in_use_end(nmp, error)) {
6182 nofp = NULL;
6183 goto restart;
6184 }
6185 nfs_open_owner_rele(noop);
6186 if (error)
6187 return (error);
6188 do_read:
6189 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
6190 }
6191
6192 /*
6193 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6194 * Files are created using the NFSv4 OPEN RPC. So we must open the
6195 * file to create it and then close it.
6196 */
6197 int
6198 nfs4_vnop_create(
6199 struct vnop_create_args /* {
6200 struct vnodeop_desc *a_desc;
6201 vnode_t a_dvp;
6202 vnode_t *a_vpp;
6203 struct componentname *a_cnp;
6204 struct vnode_attr *a_vap;
6205 vfs_context_t a_context;
6206 } */ *ap)
6207 {
6208 vfs_context_t ctx = ap->a_context;
6209 struct componentname *cnp = ap->a_cnp;
6210 struct vnode_attr *vap = ap->a_vap;
6211 vnode_t dvp = ap->a_dvp;
6212 vnode_t *vpp = ap->a_vpp;
6213 struct nfsmount *nmp;
6214 nfsnode_t np;
6215 int error = 0, busyerror = 0, accessMode, denyMode;
6216 struct nfs_open_owner *noop = NULL;
6217 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6218
6219 nmp = VTONMP(dvp);
6220 if (nfs_mount_gone(nmp))
6221 return (ENXIO);
6222
6223 if (vap)
6224 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6225
6226 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6227 if (!noop)
6228 return (ENOMEM);
6229
6230 restart:
6231 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6232 if (error) {
6233 nfs_open_owner_rele(noop);
6234 return (error);
6235 }
6236
6237 /* grab a provisional, nodeless open file */
6238 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6239 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6240 printf("nfs_vnop_create: LOST\n");
6241 error = EIO;
6242 }
6243 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6244 /* This shouldn't happen given that this is a new, nodeless nofp */
6245 nfs_mount_state_in_use_end(nmp, 0);
6246 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6247 nfs_open_file_destroy(newnofp);
6248 newnofp = NULL;
6249 if (!error)
6250 goto restart;
6251 }
6252 if (!error)
6253 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6254 if (error) {
6255 if (newnofp)
6256 nfs_open_file_destroy(newnofp);
6257 newnofp = NULL;
6258 goto out;
6259 }
6260
6261 /*
6262 * We're just trying to create the file.
6263 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6264 */
6265 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6266 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6267
6268 /* Do the open/create */
6269 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6270 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6271 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6272 /*
6273 * Hmm... it looks like we may have a situation where the request was
6274 * retransmitted because we didn't get the first response which successfully
6275 * created/opened the file and then the second time we were denied the open
6276 * because the mode the file was created with doesn't allow write access.
6277 *
6278 * We'll try to work around this by temporarily updating the mode and
6279 * retrying the open.
6280 */
6281 struct vnode_attr vattr;
6282
6283 /* first make sure it's there */
6284 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6285 if (!error2 && np) {
6286 nfs_node_unlock(np);
6287 *vpp = NFSTOV(np);
6288 if (vnode_vtype(NFSTOV(np)) == VREG) {
6289 VATTR_INIT(&vattr);
6290 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6291 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6292 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6293 VATTR_INIT(&vattr);
6294 VATTR_SET(&vattr, va_mode, vap->va_mode);
6295 nfs4_setattr_rpc(np, &vattr, ctx);
6296 if (!error2)
6297 error = 0;
6298 }
6299 }
6300 if (error) {
6301 vnode_put(*vpp);
6302 *vpp = NULL;
6303 }
6304 }
6305 }
6306 if (!error && !*vpp) {
6307 printf("nfs4_open_rpc returned without a node?\n");
6308 /* Hmmm... with no node, we have no filehandle and can't close it */
6309 error = EIO;
6310 }
6311 if (error) {
6312 /* need to cleanup our temporary nofp */
6313 nfs_open_file_clear_busy(newnofp);
6314 nfs_open_file_destroy(newnofp);
6315 newnofp = NULL;
6316 goto out;
6317 }
6318 /* After we have a node, add our open file struct to the node */
6319 np = VTONFS(*vpp);
6320 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6321 nofp = newnofp;
6322 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6323 if (error) {
6324 /* This shouldn't happen, because we passed in a new nofp to use. */
6325 printf("nfs_open_file_find_internal failed! %d\n", error);
6326 goto out;
6327 } else if (nofp != newnofp) {
6328 /*
6329 * Hmm... an open file struct already exists.
6330 * Mark the existing one busy and merge our open into it.
6331 * Then destroy the one we created.
6332 * Note: there's no chance of an open confict because the
6333 * open has already been granted.
6334 */
6335 busyerror = nfs_open_file_set_busy(nofp, NULL);
6336 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6337 nofp->nof_stateid = newnofp->nof_stateid;
6338 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
6339 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6340 nfs_open_file_clear_busy(newnofp);
6341 nfs_open_file_destroy(newnofp);
6342 }
6343 newnofp = NULL;
6344 /* mark the node as holding a create-initiated open */
6345 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6346 nofp->nof_creator = current_thread();
6347 out:
6348 if (nofp && !busyerror)
6349 nfs_open_file_clear_busy(nofp);
6350 if (nfs_mount_state_in_use_end(nmp, error)) {
6351 nofp = newnofp = NULL;
6352 busyerror = 0;
6353 goto restart;
6354 }
6355 if (noop)
6356 nfs_open_owner_rele(noop);
6357 return (error);
6358 }
6359
6360 /*
6361 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6362 */
6363 int
6364 nfs4_create_rpc(
6365 vfs_context_t ctx,
6366 nfsnode_t dnp,
6367 struct componentname *cnp,
6368 struct vnode_attr *vap,
6369 int type,
6370 char *link,
6371 nfsnode_t *npp)
6372 {
6373 struct nfsmount *nmp;
6374 struct nfs_vattr nvattr;
6375 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6376 int nfsvers, namedattrs, numops;
6377 u_int64_t xid, savedxid = 0;
6378 nfsnode_t np = NULL;
6379 vnode_t newvp = NULL;
6380 struct nfsm_chain nmreq, nmrep;
6381 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6382 const char *tag;
6383 nfs_specdata sd;
6384 fhandle_t fh;
6385 struct nfsreq rq, *req = &rq;
6386 struct nfs_dulookup dul;
6387 struct nfsreq_secinfo_args si;
6388
6389 nmp = NFSTONMP(dnp);
6390 if (nfs_mount_gone(nmp))
6391 return (ENXIO);
6392 nfsvers = nmp->nm_vers;
6393 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6394 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6395 return (EINVAL);
6396
6397 sd.specdata1 = sd.specdata2 = 0;
6398
6399 switch (type) {
6400 case NFLNK:
6401 tag = "symlink";
6402 break;
6403 case NFBLK:
6404 case NFCHR:
6405 tag = "mknod";
6406 if (!VATTR_IS_ACTIVE(vap, va_rdev))
6407 return (EINVAL);
6408 sd.specdata1 = major(vap->va_rdev);
6409 sd.specdata2 = minor(vap->va_rdev);
6410 break;
6411 case NFSOCK:
6412 case NFFIFO:
6413 tag = "mknod";
6414 break;
6415 case NFDIR:
6416 tag = "mkdir";
6417 break;
6418 default:
6419 return (EINVAL);
6420 }
6421
6422 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6423
6424 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6425 if (!namedattrs)
6426 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6427
6428 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6429 NVATTR_INIT(&nvattr);
6430 nfsm_chain_null(&nmreq);
6431 nfsm_chain_null(&nmrep);
6432
6433 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6434 numops = 6;
6435 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6436 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6437 numops--;
6438 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6439 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6440 numops--;
6441 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6442 numops--;
6443 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6444 nfsm_chain_add_32(error, &nmreq, type);
6445 if (type == NFLNK) {
6446 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6447 } else if ((type == NFBLK) || (type == NFCHR)) {
6448 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6449 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6450 }
6451 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6452 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6453 numops--;
6454 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6455 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6456 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6457 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6458 numops--;
6459 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6460 numops--;
6461 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6462 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6463 nfsm_chain_build_done(error, &nmreq);
6464 nfsm_assert(error, (numops == 0), EPROTO);
6465 nfsmout_if(error);
6466
6467 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6468 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6469 if (!error) {
6470 if (!namedattrs)
6471 nfs_dulookup_start(&dul, dnp, ctx);
6472 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6473 }
6474
6475 if ((lockerror = nfs_node_lock(dnp)))
6476 error = lockerror;
6477 nfsm_chain_skip_tag(error, &nmrep);
6478 nfsm_chain_get_32(error, &nmrep, numops);
6479 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6480 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6481 nfsmout_if(error);
6482 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6483 nfsm_chain_check_change_info(error, &nmrep, dnp);
6484 bmlen = NFS_ATTR_BITMAP_LEN;
6485 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6486 /* At this point if we have no error, the object was created. */
6487 /* if we don't get attributes, then we should lookitup. */
6488 create_error = error;
6489 nfsmout_if(error);
6490 nfs_vattr_set_supported(bitmap, vap);
6491 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6492 nfsmout_if(error);
6493 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6494 nfsmout_if(error);
6495 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6496 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6497 error = EBADRPC;
6498 goto nfsmout;
6499 }
6500 /* directory attributes: if we don't get them, make sure to invalidate */
6501 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6502 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6503 savedxid = xid;
6504 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
6505 if (error)
6506 NATTRINVALIDATE(dnp);
6507
6508 nfsmout:
6509 nfsm_chain_cleanup(&nmreq);
6510 nfsm_chain_cleanup(&nmrep);
6511
6512 if (!lockerror) {
6513 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
6514 dnp->n_flag &= ~NNEGNCENTRIES;
6515 cache_purge_negatives(NFSTOV(dnp));
6516 }
6517 dnp->n_flag |= NMODIFIED;
6518 nfs_node_unlock(dnp);
6519 /* nfs_getattr() will check changed and purge caches */
6520 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6521 }
6522
6523 if (!error && fh.fh_len) {
6524 /* create the vnode with the filehandle and attributes */
6525 xid = savedxid;
6526 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6527 if (!error)
6528 newvp = NFSTOV(np);
6529 }
6530 NVATTR_CLEANUP(&nvattr);
6531
6532 if (!namedattrs)
6533 nfs_dulookup_finish(&dul, dnp, ctx);
6534
6535 /*
6536 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6537 * if we can succeed in looking up the object.
6538 */
6539 if ((create_error == EEXIST) || (!create_error && !newvp)) {
6540 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6541 if (!error) {
6542 newvp = NFSTOV(np);
6543 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers))
6544 error = EEXIST;
6545 }
6546 }
6547 if (!busyerror)
6548 nfs_node_clear_busy(dnp);
6549 if (error) {
6550 if (newvp) {
6551 nfs_node_unlock(np);
6552 vnode_put(newvp);
6553 }
6554 } else {
6555 nfs_node_unlock(np);
6556 *npp = np;
6557 }
6558 return (error);
6559 }
6560
6561 int
6562 nfs4_vnop_mknod(
6563 struct vnop_mknod_args /* {
6564 struct vnodeop_desc *a_desc;
6565 vnode_t a_dvp;
6566 vnode_t *a_vpp;
6567 struct componentname *a_cnp;
6568 struct vnode_attr *a_vap;
6569 vfs_context_t a_context;
6570 } */ *ap)
6571 {
6572 nfsnode_t np = NULL;
6573 struct nfsmount *nmp;
6574 int error;
6575
6576 nmp = VTONMP(ap->a_dvp);
6577 if (nfs_mount_gone(nmp))
6578 return (ENXIO);
6579
6580 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
6581 return (EINVAL);
6582 switch (ap->a_vap->va_type) {
6583 case VBLK:
6584 case VCHR:
6585 case VFIFO:
6586 case VSOCK:
6587 break;
6588 default:
6589 return (ENOTSUP);
6590 }
6591
6592 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6593 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
6594 if (!error)
6595 *ap->a_vpp = NFSTOV(np);
6596 return (error);
6597 }
6598
6599 int
6600 nfs4_vnop_mkdir(
6601 struct vnop_mkdir_args /* {
6602 struct vnodeop_desc *a_desc;
6603 vnode_t a_dvp;
6604 vnode_t *a_vpp;
6605 struct componentname *a_cnp;
6606 struct vnode_attr *a_vap;
6607 vfs_context_t a_context;
6608 } */ *ap)
6609 {
6610 nfsnode_t np = NULL;
6611 int error;
6612
6613 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6614 NFDIR, NULL, &np);
6615 if (!error)
6616 *ap->a_vpp = NFSTOV(np);
6617 return (error);
6618 }
6619
6620 int
6621 nfs4_vnop_symlink(
6622 struct vnop_symlink_args /* {
6623 struct vnodeop_desc *a_desc;
6624 vnode_t a_dvp;
6625 vnode_t *a_vpp;
6626 struct componentname *a_cnp;
6627 struct vnode_attr *a_vap;
6628 char *a_target;
6629 vfs_context_t a_context;
6630 } */ *ap)
6631 {
6632 nfsnode_t np = NULL;
6633 int error;
6634
6635 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6636 NFLNK, ap->a_target, &np);
6637 if (!error)
6638 *ap->a_vpp = NFSTOV(np);
6639 return (error);
6640 }
6641
6642 int
6643 nfs4_vnop_link(
6644 struct vnop_link_args /* {
6645 struct vnodeop_desc *a_desc;
6646 vnode_t a_vp;
6647 vnode_t a_tdvp;
6648 struct componentname *a_cnp;
6649 vfs_context_t a_context;
6650 } */ *ap)
6651 {
6652 vfs_context_t ctx = ap->a_context;
6653 vnode_t vp = ap->a_vp;
6654 vnode_t tdvp = ap->a_tdvp;
6655 struct componentname *cnp = ap->a_cnp;
6656 int error = 0, lockerror = ENOENT, status;
6657 struct nfsmount *nmp;
6658 nfsnode_t np = VTONFS(vp);
6659 nfsnode_t tdnp = VTONFS(tdvp);
6660 int nfsvers, numops;
6661 u_int64_t xid, savedxid;
6662 struct nfsm_chain nmreq, nmrep;
6663 struct nfsreq_secinfo_args si;
6664
6665 if (vnode_mount(vp) != vnode_mount(tdvp))
6666 return (EXDEV);
6667
6668 nmp = VTONMP(vp);
6669 if (nfs_mount_gone(nmp))
6670 return (ENXIO);
6671 nfsvers = nmp->nm_vers;
6672 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6673 return (EINVAL);
6674 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6675 return (EINVAL);
6676
6677 /*
6678 * Push all writes to the server, so that the attribute cache
6679 * doesn't get "out of sync" with the server.
6680 * XXX There should be a better way!
6681 */
6682 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
6683
6684 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
6685 return (error);
6686
6687 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6688 nfsm_chain_null(&nmreq);
6689 nfsm_chain_null(&nmrep);
6690
6691 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6692 numops = 7;
6693 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
6694 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6695 numops--;
6696 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6697 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6698 numops--;
6699 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6700 numops--;
6701 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6702 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
6703 numops--;
6704 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
6705 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6706 numops--;
6707 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6708 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
6709 numops--;
6710 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6711 numops--;
6712 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6713 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6714 nfsm_chain_build_done(error, &nmreq);
6715 nfsm_assert(error, (numops == 0), EPROTO);
6716 nfsmout_if(error);
6717 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
6718
6719 if ((lockerror = nfs_node_lock2(tdnp, np))) {
6720 error = lockerror;
6721 goto nfsmout;
6722 }
6723 nfsm_chain_skip_tag(error, &nmrep);
6724 nfsm_chain_get_32(error, &nmrep, numops);
6725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6726 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6727 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6728 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
6729 nfsm_chain_check_change_info(error, &nmrep, tdnp);
6730 /* directory attributes: if we don't get them, make sure to invalidate */
6731 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6732 savedxid = xid;
6733 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
6734 if (error)
6735 NATTRINVALIDATE(tdnp);
6736 /* link attributes: if we don't get them, make sure to invalidate */
6737 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6738 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6739 xid = savedxid;
6740 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6741 if (error)
6742 NATTRINVALIDATE(np);
6743 nfsmout:
6744 nfsm_chain_cleanup(&nmreq);
6745 nfsm_chain_cleanup(&nmrep);
6746 if (!lockerror)
6747 tdnp->n_flag |= NMODIFIED;
6748 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6749 if (error == EEXIST)
6750 error = 0;
6751 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
6752 tdnp->n_flag &= ~NNEGNCENTRIES;
6753 cache_purge_negatives(tdvp);
6754 }
6755 if (!lockerror)
6756 nfs_node_unlock2(tdnp, np);
6757 nfs_node_clear_busy2(tdnp, np);
6758 return (error);
6759 }
6760
6761 int
6762 nfs4_vnop_rmdir(
6763 struct vnop_rmdir_args /* {
6764 struct vnodeop_desc *a_desc;
6765 vnode_t a_dvp;
6766 vnode_t a_vp;
6767 struct componentname *a_cnp;
6768 vfs_context_t a_context;
6769 } */ *ap)
6770 {
6771 vfs_context_t ctx = ap->a_context;
6772 vnode_t vp = ap->a_vp;
6773 vnode_t dvp = ap->a_dvp;
6774 struct componentname *cnp = ap->a_cnp;
6775 struct nfsmount *nmp;
6776 int error = 0, namedattrs;
6777 nfsnode_t np = VTONFS(vp);
6778 nfsnode_t dnp = VTONFS(dvp);
6779 struct nfs_dulookup dul;
6780
6781 if (vnode_vtype(vp) != VDIR)
6782 return (EINVAL);
6783
6784 nmp = NFSTONMP(dnp);
6785 if (nfs_mount_gone(nmp))
6786 return (ENXIO);
6787 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6788
6789 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
6790 return (error);
6791
6792 if (!namedattrs) {
6793 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6794 nfs_dulookup_start(&dul, dnp, ctx);
6795 }
6796
6797 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6798 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6799
6800 nfs_name_cache_purge(dnp, np, cnp, ctx);
6801 /* nfs_getattr() will check changed and purge caches */
6802 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6803 if (!namedattrs)
6804 nfs_dulookup_finish(&dul, dnp, ctx);
6805 nfs_node_clear_busy2(dnp, np);
6806
6807 /*
6808 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6809 */
6810 if (error == ENOENT)
6811 error = 0;
6812 if (!error) {
6813 /*
6814 * remove nfsnode from hash now so we can't accidentally find it
6815 * again if another object gets created with the same filehandle
6816 * before this vnode gets reclaimed
6817 */
6818 lck_mtx_lock(nfs_node_hash_mutex);
6819 if (np->n_hflag & NHHASHED) {
6820 LIST_REMOVE(np, n_hash);
6821 np->n_hflag &= ~NHHASHED;
6822 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
6823 }
6824 lck_mtx_unlock(nfs_node_hash_mutex);
6825 }
6826 return (error);
6827 }
6828
6829 /*
6830 * NFSv4 Named Attributes
6831 *
6832 * Both the extended attributes interface and the named streams interface
6833 * are backed by NFSv4 named attributes. The implementations for both use
6834 * a common set of routines in an attempt to reduce code duplication, to
6835 * increase efficiency, to increase caching of both names and data, and to
6836 * confine the complexity.
6837 *
6838 * Each NFS node caches its named attribute directory's file handle.
6839 * The directory nodes for the named attribute directories are handled
6840 * exactly like regular directories (with a couple minor exceptions).
6841 * Named attribute nodes are also treated as much like regular files as
6842 * possible.
6843 *
6844 * Most of the heavy lifting is done by nfs4_named_attr_get().
6845 */
6846
6847 /*
6848 * Get the given node's attribute directory node.
6849 * If !fetch, then only return a cached node.
6850 * Otherwise, we will attempt to fetch the node from the server.
6851 * (Note: the node should be marked busy.)
6852 */
6853 nfsnode_t
6854 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
6855 {
6856 nfsnode_t adnp = NULL;
6857 struct nfsmount *nmp;
6858 int error = 0, status, numops;
6859 struct nfsm_chain nmreq, nmrep;
6860 u_int64_t xid;
6861 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
6862 fhandle_t fh;
6863 struct nfs_vattr nvattr;
6864 struct componentname cn;
6865 struct nfsreq rq, *req = &rq;
6866 struct nfsreq_secinfo_args si;
6867
6868 nmp = NFSTONMP(np);
6869 if (nfs_mount_gone(nmp))
6870 return (NULL);
6871 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6872 return (NULL);
6873
6874 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6875 NVATTR_INIT(&nvattr);
6876 nfsm_chain_null(&nmreq);
6877 nfsm_chain_null(&nmrep);
6878
6879 bzero(&cn, sizeof(cn));
6880 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
6881 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
6882 cn.cn_nameiop = LOOKUP;
6883
6884 if (np->n_attrdirfh) {
6885 // XXX can't set parent correctly (to np) yet
6886 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh,
6887 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
6888 if (adnp)
6889 goto nfsmout;
6890 }
6891 if (!fetch) {
6892 error = ENOENT;
6893 goto nfsmout;
6894 }
6895
6896 // PUTFH, OPENATTR, GETATTR
6897 numops = 3;
6898 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
6899 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6900 numops--;
6901 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6902 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
6903 numops--;
6904 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
6905 nfsm_chain_add_32(error, &nmreq, 0);
6906 numops--;
6907 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6908 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6909 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6910 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
6911 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6912 nfsm_chain_build_done(error, &nmreq);
6913 nfsm_assert(error, (numops == 0), EPROTO);
6914 nfsmout_if(error);
6915 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6916 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6917 if (!error)
6918 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6919
6920 nfsm_chain_skip_tag(error, &nmrep);
6921 nfsm_chain_get_32(error, &nmrep, numops);
6922 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6923 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
6924 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6925 nfsmout_if(error);
6926 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6927 nfsmout_if(error);
6928 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
6929 error = ENOENT;
6930 goto nfsmout;
6931 }
6932 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
6933 /* (re)allocate attrdir fh buffer */
6934 if (np->n_attrdirfh)
6935 FREE(np->n_attrdirfh, M_TEMP);
6936 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
6937 }
6938 if (!np->n_attrdirfh) {
6939 error = ENOMEM;
6940 goto nfsmout;
6941 }
6942 /* cache the attrdir fh in the node */
6943 *np->n_attrdirfh = fh.fh_len;
6944 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
6945 /* create node for attrdir */
6946 // XXX can't set parent correctly (to np) yet
6947 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
6948 nfsmout:
6949 NVATTR_CLEANUP(&nvattr);
6950 nfsm_chain_cleanup(&nmreq);
6951 nfsm_chain_cleanup(&nmrep);
6952
6953 if (adnp) {
6954 /* sanity check that this node is an attribute directory */
6955 if (adnp->n_vattr.nva_type != VDIR)
6956 error = EINVAL;
6957 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
6958 error = EINVAL;
6959 nfs_node_unlock(adnp);
6960 if (error)
6961 vnode_put(NFSTOV(adnp));
6962 }
6963 return (error ? NULL : adnp);
6964 }
6965
6966 /*
6967 * Get the given node's named attribute node for the name given.
6968 *
6969 * In an effort to increase the performance of named attribute access, we try
6970 * to reduce server requests by doing the following:
6971 *
6972 * - cache the node's named attribute directory file handle in the node
6973 * - maintain a directory vnode for the attribute directory
6974 * - use name cache entries (positive and negative) to speed up lookups
6975 * - optionally open the named attribute (with the given accessMode) in the same RPC
6976 * - combine attribute directory retrieval with the lookup/open RPC
6977 * - optionally prefetch the named attribute's first block of data in the same RPC
6978 *
6979 * Also, in an attempt to reduce the number of copies/variations of this code,
6980 * parts of the RPC building/processing code are conditionalized on what is
6981 * needed for any particular request (openattr, lookup vs. open, read).
6982 *
6983 * Note that because we may not have the attribute directory node when we start
6984 * the lookup/open, we lock both the node and the attribute directory node.
6985 */
6986
6987 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6988 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6989 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6990 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6991
6992 int
6993 nfs4_named_attr_get(
6994 nfsnode_t np,
6995 struct componentname *cnp,
6996 uint32_t accessMode,
6997 int flags,
6998 vfs_context_t ctx,
6999 nfsnode_t *anpp,
7000 struct nfs_open_file **nofpp)
7001 {
7002 struct nfsmount *nmp;
7003 int error = 0, open_error = EIO;
7004 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7005 int create, guarded, prefetch, truncate, noopbusy = 0;
7006 int open, status, numops, hadattrdir, negnamecache;
7007 struct nfs_vattr nvattr;
7008 struct vnode_attr vattr;
7009 nfsnode_t adnp = NULL, anp = NULL;
7010 vnode_t avp = NULL;
7011 u_int64_t xid, savedxid = 0;
7012 struct nfsm_chain nmreq, nmrep;
7013 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7014 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7015 nfs_stateid stateid, dstateid;
7016 fhandle_t fh;
7017 struct nfs_open_owner *noop = NULL;
7018 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7019 struct vnop_access_args naa;
7020 thread_t thd;
7021 kauth_cred_t cred;
7022 struct timeval now;
7023 char sbuf[64], *s;
7024 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7025 struct kauth_ace ace;
7026 struct nfsreq rq, *req = &rq;
7027 struct nfsreq_secinfo_args si;
7028
7029 *anpp = NULL;
7030 fh.fh_len = 0;
7031 rflags = delegation = recall = eof = rlen = retlen = 0;
7032 ace.ace_flags = 0;
7033 s = sbuf;
7034 slen = sizeof(sbuf);
7035
7036 nmp = NFSTONMP(np);
7037 if (nfs_mount_gone(nmp))
7038 return (ENXIO);
7039 NVATTR_INIT(&nvattr);
7040 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7041 thd = vfs_context_thread(ctx);
7042 cred = vfs_context_ucred(ctx);
7043 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7044 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7045 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7046 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7047
7048 if (!create) {
7049 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7050 if (error)
7051 return (error);
7052 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7053 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7054 return (ENOATTR);
7055 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7056 /* shouldn't happen... but just be safe */
7057 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7058 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7059 }
7060 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7061 if (open) {
7062 /*
7063 * We're trying to open the file.
7064 * We'll create/open it with the given access mode,
7065 * and set NFS_OPEN_FILE_CREATE.
7066 */
7067 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7068 if (prefetch && guarded)
7069 prefetch = 0; /* no sense prefetching data that can't be there */
7070
7071 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7072 if (!noop)
7073 return (ENOMEM);
7074 }
7075
7076 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7077 return (error);
7078
7079 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7080 hadattrdir = (adnp != NULL);
7081 if (prefetch) {
7082 microuptime(&now);
7083 /* use the special state ID because we don't have a real one to send */
7084 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7085 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7086 }
7087 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7088 nfsm_chain_null(&nmreq);
7089 nfsm_chain_null(&nmrep);
7090
7091 if (hadattrdir) {
7092 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx))))
7093 goto nfsmout;
7094 /* nfs_getattr() will check changed and purge caches */
7095 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7096 nfsmout_if(error);
7097 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7098 switch (error) {
7099 case ENOENT:
7100 /* negative cache entry */
7101 goto nfsmout;
7102 case 0:
7103 /* cache miss */
7104 /* try dir buf cache lookup */
7105 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7106 if (!error && anp) {
7107 /* dir buf cache hit */
7108 *anpp = anp;
7109 error = -1;
7110 }
7111 if (error != -1) /* cache miss */
7112 break;
7113 /* FALLTHROUGH */
7114 case -1:
7115 /* cache hit, not really an error */
7116 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7117 if (!anp && avp)
7118 *anpp = anp = VTONFS(avp);
7119
7120 nfs_node_clear_busy(adnp);
7121 adbusyerror = ENOENT;
7122
7123 /* check for directory access */
7124 naa.a_desc = &vnop_access_desc;
7125 naa.a_vp = NFSTOV(adnp);
7126 naa.a_action = KAUTH_VNODE_SEARCH;
7127 naa.a_context = ctx;
7128
7129 /* compute actual success/failure based on accessibility */
7130 error = nfs_vnop_access(&naa);
7131 /* FALLTHROUGH */
7132 default:
7133 /* we either found it, or hit an error */
7134 if (!error && guarded) {
7135 /* found cached entry but told not to use it */
7136 error = EEXIST;
7137 vnode_put(NFSTOV(anp));
7138 *anpp = anp = NULL;
7139 }
7140 /* we're done if error or we don't need to open */
7141 if (error || !open)
7142 goto nfsmout;
7143 /* no error and we need to open... */
7144 }
7145 }
7146
7147 if (open) {
7148 restart:
7149 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7150 if (error) {
7151 nfs_open_owner_rele(noop);
7152 noop = NULL;
7153 goto nfsmout;
7154 }
7155 inuse = 1;
7156
7157 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7158 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7159 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7160 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7161 error = EIO;
7162 }
7163 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7164 nfs_mount_state_in_use_end(nmp, 0);
7165 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7166 nfs_open_file_destroy(newnofp);
7167 newnofp = NULL;
7168 if (!error)
7169 goto restart;
7170 }
7171 if (!error)
7172 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7173 if (error) {
7174 if (newnofp)
7175 nfs_open_file_destroy(newnofp);
7176 newnofp = NULL;
7177 goto nfsmout;
7178 }
7179 if (anp) {
7180 /*
7181 * We already have the node. So we just need to open
7182 * it - which we may be able to do with a delegation.
7183 */
7184 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7185 if (!error) {
7186 /* open succeeded, so our open file is no longer temporary */
7187 nofp = newnofp;
7188 nofpbusyerror = 0;
7189 newnofp = NULL;
7190 if (nofpp)
7191 *nofpp = nofp;
7192 }
7193 goto nfsmout;
7194 }
7195 }
7196
7197 /*
7198 * We either don't have the attrdir or we didn't find the attribute
7199 * in the name cache, so we need to talk to the server.
7200 *
7201 * If we don't have the attrdir, we'll need to ask the server for that too.
7202 * If the caller is requesting that the attribute be created, we need to
7203 * make sure the attrdir is created.
7204 * The caller may also request that the first block of an existing attribute
7205 * be retrieved at the same time.
7206 */
7207
7208 if (open) {
7209 /* need to mark the open owner busy during the RPC */
7210 if ((error = nfs_open_owner_set_busy(noop, thd)))
7211 goto nfsmout;
7212 noopbusy = 1;
7213 }
7214
7215 /*
7216 * We'd like to get updated post-open/lookup attributes for the
7217 * directory and we may also want to prefetch some data via READ.
7218 * We'd like the READ results to be last so that we can leave the
7219 * data in the mbufs until the end.
7220 *
7221 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7222 */
7223 numops = 5;
7224 if (!hadattrdir)
7225 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7226 if (prefetch)
7227 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7228 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7229 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7230 if (hadattrdir) {
7231 numops--;
7232 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7233 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7234 } else {
7235 numops--;
7236 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7237 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7238 numops--;
7239 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7240 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7241 numops--;
7242 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7243 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7244 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7245 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7246 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7247 }
7248 if (open) {
7249 numops--;
7250 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7251 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7252 nfsm_chain_add_32(error, &nmreq, accessMode);
7253 nfsm_chain_add_32(error, &nmreq, denyMode);
7254 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7255 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7256 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7257 nfsm_chain_add_32(error, &nmreq, create);
7258 if (create) {
7259 nfsm_chain_add_32(error, &nmreq, guarded);
7260 VATTR_INIT(&vattr);
7261 if (truncate)
7262 VATTR_SET(&vattr, va_data_size, 0);
7263 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7264 }
7265 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7266 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7267 } else {
7268 numops--;
7269 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7270 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7271 }
7272 numops--;
7273 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7274 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7275 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7276 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7277 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7278 if (prefetch) {
7279 numops--;
7280 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7281 }
7282 if (hadattrdir) {
7283 numops--;
7284 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7285 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7286 } else {
7287 numops--;
7288 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7289 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7290 numops--;
7291 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7292 nfsm_chain_add_32(error, &nmreq, 0);
7293 }
7294 numops--;
7295 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7296 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7297 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7298 if (prefetch) {
7299 numops--;
7300 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7301 numops--;
7302 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7303 VATTR_INIT(&vattr);
7304 VATTR_SET(&vattr, va_data_size, 0);
7305 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7306 numops--;
7307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7308 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7309 nfsm_chain_add_64(error, &nmreq, 0);
7310 nfsm_chain_add_32(error, &nmreq, rlen);
7311 }
7312 nfsm_chain_build_done(error, &nmreq);
7313 nfsm_assert(error, (numops == 0), EPROTO);
7314 nfsmout_if(error);
7315 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7316 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7317 if (!error)
7318 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7319
7320 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp))))
7321 error = adlockerror;
7322 savedxid = xid;
7323 nfsm_chain_skip_tag(error, &nmrep);
7324 nfsm_chain_get_32(error, &nmrep, numops);
7325 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7326 if (!hadattrdir) {
7327 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7328 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7329 nfsmout_if(error);
7330 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7331 nfsmout_if(error);
7332 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7333 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7334 /* (re)allocate attrdir fh buffer */
7335 if (np->n_attrdirfh)
7336 FREE(np->n_attrdirfh, M_TEMP);
7337 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
7338 }
7339 if (np->n_attrdirfh) {
7340 /* remember the attrdir fh in the node */
7341 *np->n_attrdirfh = fh.fh_len;
7342 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
7343 /* create busied node for attrdir */
7344 struct componentname cn;
7345 bzero(&cn, sizeof(cn));
7346 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7347 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7348 cn.cn_nameiop = LOOKUP;
7349 // XXX can't set parent correctly (to np) yet
7350 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7351 if (!error) {
7352 adlockerror = 0;
7353 /* set the node busy */
7354 SET(adnp->n_flag, NBUSY);
7355 adbusyerror = 0;
7356 }
7357 /* if no adnp, oh well... */
7358 error = 0;
7359 }
7360 }
7361 NVATTR_CLEANUP(&nvattr);
7362 fh.fh_len = 0;
7363 }
7364 if (open) {
7365 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7366 nfs_owner_seqid_increment(noop, NULL, error);
7367 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7368 nfsm_chain_check_change_info(error, &nmrep, adnp);
7369 nfsm_chain_get_32(error, &nmrep, rflags);
7370 bmlen = NFS_ATTR_BITMAP_LEN;
7371 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7372 nfsm_chain_get_32(error, &nmrep, delegation);
7373 if (!error)
7374 switch (delegation) {
7375 case NFS_OPEN_DELEGATE_NONE:
7376 break;
7377 case NFS_OPEN_DELEGATE_READ:
7378 case NFS_OPEN_DELEGATE_WRITE:
7379 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7380 nfsm_chain_get_32(error, &nmrep, recall);
7381 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
7382 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7383 /* if we have any trouble accepting the ACE, just invalidate it */
7384 ace_type = ace_flags = ace_mask = len = 0;
7385 nfsm_chain_get_32(error, &nmrep, ace_type);
7386 nfsm_chain_get_32(error, &nmrep, ace_flags);
7387 nfsm_chain_get_32(error, &nmrep, ace_mask);
7388 nfsm_chain_get_32(error, &nmrep, len);
7389 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7390 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7391 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7392 if (!error && (len >= slen)) {
7393 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
7394 if (s)
7395 slen = len+1;
7396 else
7397 ace.ace_flags = 0;
7398 }
7399 if (s)
7400 nfsm_chain_get_opaque(error, &nmrep, len, s);
7401 else
7402 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7403 if (!error && s) {
7404 s[len] = '\0';
7405 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
7406 ace.ace_flags = 0;
7407 }
7408 if (error || !s)
7409 ace.ace_flags = 0;
7410 if (s && (s != sbuf))
7411 FREE(s, M_TEMP);
7412 break;
7413 default:
7414 error = EBADRPC;
7415 break;
7416 }
7417 /* At this point if we have no error, the object was created/opened. */
7418 open_error = error;
7419 } else {
7420 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7421 }
7422 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7423 nfsmout_if(error);
7424 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7425 nfsmout_if(error);
7426 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7427 error = EIO;
7428 goto nfsmout;
7429 }
7430 if (prefetch)
7431 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7432 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7433 if (!hadattrdir)
7434 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7435 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7436 nfsmout_if(error);
7437 xid = savedxid;
7438 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
7439 nfsmout_if(error);
7440
7441 if (open) {
7442 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
7443 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7444 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
7445 if (adnp) {
7446 nfs_node_unlock(adnp);
7447 adlockerror = ENOENT;
7448 }
7449 NVATTR_CLEANUP(&nvattr);
7450 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
7451 nfsmout_if(error);
7452 savedxid = xid;
7453 if ((adlockerror = nfs_node_lock(adnp)))
7454 error = adlockerror;
7455 }
7456 }
7457
7458 nfsmout:
7459 if (open && adnp && !adlockerror) {
7460 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
7461 adnp->n_flag &= ~NNEGNCENTRIES;
7462 cache_purge_negatives(NFSTOV(adnp));
7463 }
7464 adnp->n_flag |= NMODIFIED;
7465 nfs_node_unlock(adnp);
7466 adlockerror = ENOENT;
7467 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7468 }
7469 if (adnp && !adlockerror && (error == ENOENT) &&
7470 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
7471 /* add a negative entry in the name cache */
7472 cache_enter(NFSTOV(adnp), NULL, cnp);
7473 adnp->n_flag |= NNEGNCENTRIES;
7474 }
7475 if (adnp && !adlockerror) {
7476 nfs_node_unlock(adnp);
7477 adlockerror = ENOENT;
7478 }
7479 if (!error && !anp && fh.fh_len) {
7480 /* create the vnode with the filehandle and attributes */
7481 xid = savedxid;
7482 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
7483 if (!error) {
7484 *anpp = anp;
7485 nfs_node_unlock(anp);
7486 }
7487 if (!error && open) {
7488 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7489 /* After we have a node, add our open file struct to the node */
7490 nofp = newnofp;
7491 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
7492 if (error) {
7493 /* This shouldn't happen, because we passed in a new nofp to use. */
7494 printf("nfs_open_file_find_internal failed! %d\n", error);
7495 nofp = NULL;
7496 } else if (nofp != newnofp) {
7497 /*
7498 * Hmm... an open file struct already exists.
7499 * Mark the existing one busy and merge our open into it.
7500 * Then destroy the one we created.
7501 * Note: there's no chance of an open confict because the
7502 * open has already been granted.
7503 */
7504 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
7505 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7506 nofp->nof_stateid = newnofp->nof_stateid;
7507 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
7508 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7509 nfs_open_file_clear_busy(newnofp);
7510 nfs_open_file_destroy(newnofp);
7511 newnofp = NULL;
7512 }
7513 if (!error) {
7514 newnofp = NULL;
7515 nofpbusyerror = 0;
7516 /* mark the node as holding a create-initiated open */
7517 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7518 nofp->nof_creator = current_thread();
7519 if (nofpp)
7520 *nofpp = nofp;
7521 }
7522 }
7523 }
7524 NVATTR_CLEANUP(&nvattr);
7525 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
7526 if (!error && anp && !recall) {
7527 /* stuff the delegation state in the node */
7528 lck_mtx_lock(&anp->n_openlock);
7529 anp->n_openflags &= ~N_DELEG_MASK;
7530 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7531 anp->n_dstateid = dstateid;
7532 anp->n_dace = ace;
7533 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7534 lck_mtx_lock(&nmp->nm_lock);
7535 if (anp->n_dlink.tqe_next == NFSNOLIST)
7536 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7537 lck_mtx_unlock(&nmp->nm_lock);
7538 }
7539 lck_mtx_unlock(&anp->n_openlock);
7540 } else {
7541 /* give the delegation back */
7542 if (anp) {
7543 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
7544 /* update delegation state and return it */
7545 lck_mtx_lock(&anp->n_openlock);
7546 anp->n_openflags &= ~N_DELEG_MASK;
7547 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7548 anp->n_dstateid = dstateid;
7549 anp->n_dace = ace;
7550 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7551 lck_mtx_lock(&nmp->nm_lock);
7552 if (anp->n_dlink.tqe_next == NFSNOLIST)
7553 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7554 lck_mtx_unlock(&nmp->nm_lock);
7555 }
7556 lck_mtx_unlock(&anp->n_openlock);
7557 /* don't need to send a separate delegreturn for fh */
7558 fh.fh_len = 0;
7559 }
7560 /* return anp's current delegation */
7561 nfs4_delegation_return(anp, 0, thd, cred);
7562 }
7563 if (fh.fh_len) /* return fh's delegation if it wasn't for anp */
7564 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
7565 }
7566 }
7567 if (open) {
7568 if (newnofp) {
7569 /* need to cleanup our temporary nofp */
7570 nfs_open_file_clear_busy(newnofp);
7571 nfs_open_file_destroy(newnofp);
7572 newnofp = NULL;
7573 } else if (nofp && !nofpbusyerror) {
7574 nfs_open_file_clear_busy(nofp);
7575 nofpbusyerror = ENOENT;
7576 }
7577 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
7578 inuse = 0;
7579 nofp = newnofp = NULL;
7580 rflags = delegation = recall = eof = rlen = retlen = 0;
7581 ace.ace_flags = 0;
7582 s = sbuf;
7583 slen = sizeof(sbuf);
7584 nfsm_chain_cleanup(&nmreq);
7585 nfsm_chain_cleanup(&nmrep);
7586 if (anp) {
7587 vnode_put(NFSTOV(anp));
7588 *anpp = anp = NULL;
7589 }
7590 hadattrdir = (adnp != NULL);
7591 if (noopbusy) {
7592 nfs_open_owner_clear_busy(noop);
7593 noopbusy = 0;
7594 }
7595 goto restart;
7596 }
7597 if (noop) {
7598 if (noopbusy) {
7599 nfs_open_owner_clear_busy(noop);
7600 noopbusy = 0;
7601 }
7602 nfs_open_owner_rele(noop);
7603 }
7604 }
7605 if (!error && prefetch && nmrep.nmc_mhead) {
7606 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7607 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
7608 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
7609 nfsm_chain_get_32(error, &nmrep, eof);
7610 nfsm_chain_get_32(error, &nmrep, retlen);
7611 if (!error && anp) {
7612 /*
7613 * There can be one problem with doing the prefetch.
7614 * Because we don't have the node before we start the RPC, we
7615 * can't have the buffer busy while the READ is performed.
7616 * So there is a chance that other I/O occured on the same
7617 * range of data while we were performing this RPC. If that
7618 * happens, then it's possible the data we have in the READ
7619 * response is no longer up to date.
7620 * Once we have the node and the buffer, we need to make sure
7621 * that there's no chance we could be putting stale data in
7622 * the buffer.
7623 * So, we check if the range read is dirty or if any I/O may
7624 * have occured on it while we were performing our RPC.
7625 */
7626 struct nfsbuf *bp = NULL;
7627 int lastpg;
7628 uint32_t pagemask;
7629
7630 retlen = MIN(retlen, rlen);
7631
7632 /* check if node needs size update or invalidation */
7633 if (ISSET(anp->n_flag, NUPDATESIZE))
7634 nfs_data_update_size(anp, 0);
7635 if (!(error = nfs_node_lock(anp))) {
7636 if (anp->n_flag & NNEEDINVALIDATE) {
7637 anp->n_flag &= ~NNEEDINVALIDATE;
7638 nfs_node_unlock(anp);
7639 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
7640 if (!error) /* lets play it safe and just drop the data */
7641 error = EIO;
7642 } else {
7643 nfs_node_unlock(anp);
7644 }
7645 }
7646
7647 /* calculate page mask for the range of data read */
7648 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
7649 pagemask = ((1 << (lastpg + 1)) - 1);
7650
7651 if (!error)
7652 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
7653 /* don't save the data if dirty or potential I/O conflict */
7654 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
7655 timevalcmp(&anp->n_lastio, &now, <)) {
7656 OSAddAtomic64(1, &nfsstats.read_bios);
7657 CLR(bp->nb_flags, (NB_DONE|NB_ASYNC));
7658 SET(bp->nb_flags, NB_READ);
7659 NFS_BUF_MAP(bp);
7660 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
7661 if (error) {
7662 bp->nb_error = error;
7663 SET(bp->nb_flags, NB_ERROR);
7664 } else {
7665 bp->nb_offio = 0;
7666 bp->nb_endio = rlen;
7667 if ((retlen > 0) && (bp->nb_endio < (int)retlen))
7668 bp->nb_endio = retlen;
7669 if (eof || (retlen == 0)) {
7670 /* zero out the remaining data (up to EOF) */
7671 off_t rpcrem, eofrem, rem;
7672 rpcrem = (rlen - retlen);
7673 eofrem = anp->n_size - (NBOFF(bp) + retlen);
7674 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
7675 if (rem > 0)
7676 bzero(bp->nb_data + retlen, rem);
7677 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
7678 /* ugh... short read ... just invalidate for now... */
7679 SET(bp->nb_flags, NB_INVAL);
7680 }
7681 }
7682 nfs_buf_read_finish(bp);
7683 microuptime(&anp->n_lastio);
7684 }
7685 if (bp)
7686 nfs_buf_release(bp, 1);
7687 }
7688 error = 0; /* ignore any transient error in processing the prefetch */
7689 }
7690 if (adnp && !adbusyerror) {
7691 nfs_node_clear_busy(adnp);
7692 adbusyerror = ENOENT;
7693 }
7694 if (!busyerror) {
7695 nfs_node_clear_busy(np);
7696 busyerror = ENOENT;
7697 }
7698 if (adnp)
7699 vnode_put(NFSTOV(adnp));
7700 if (error && *anpp) {
7701 vnode_put(NFSTOV(*anpp));
7702 *anpp = NULL;
7703 }
7704 nfsm_chain_cleanup(&nmreq);
7705 nfsm_chain_cleanup(&nmrep);
7706 return (error);
7707 }
7708
7709 /*
7710 * Remove a named attribute.
7711 */
7712 int
7713 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
7714 {
7715 nfsnode_t adnp = NULL;
7716 struct nfsmount *nmp;
7717 struct componentname cn;
7718 struct vnop_remove_args vra;
7719 int error, putanp = 0;
7720
7721 nmp = NFSTONMP(np);
7722 if (nfs_mount_gone(nmp))
7723 return (ENXIO);
7724
7725 bzero(&cn, sizeof(cn));
7726 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7727 cn.cn_namelen = strlen(name);
7728 cn.cn_nameiop = DELETE;
7729 cn.cn_flags = 0;
7730
7731 if (!anp) {
7732 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7733 0, ctx, &anp, NULL);
7734 if ((!error && !anp) || (error == ENOATTR))
7735 error = ENOENT;
7736 if (error) {
7737 if (anp) {
7738 vnode_put(NFSTOV(anp));
7739 anp = NULL;
7740 }
7741 goto out;
7742 }
7743 putanp = 1;
7744 }
7745
7746 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7747 goto out;
7748 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7749 nfs_node_clear_busy(np);
7750 if (!adnp) {
7751 error = ENOENT;
7752 goto out;
7753 }
7754
7755 vra.a_desc = &vnop_remove_desc;
7756 vra.a_dvp = NFSTOV(adnp);
7757 vra.a_vp = NFSTOV(anp);
7758 vra.a_cnp = &cn;
7759 vra.a_flags = 0;
7760 vra.a_context = ctx;
7761 error = nfs_vnop_remove(&vra);
7762 out:
7763 if (adnp)
7764 vnode_put(NFSTOV(adnp));
7765 if (putanp)
7766 vnode_put(NFSTOV(anp));
7767 return (error);
7768 }
7769
7770 int
7771 nfs4_vnop_getxattr(
7772 struct vnop_getxattr_args /* {
7773 struct vnodeop_desc *a_desc;
7774 vnode_t a_vp;
7775 const char * a_name;
7776 uio_t a_uio;
7777 size_t *a_size;
7778 int a_options;
7779 vfs_context_t a_context;
7780 } */ *ap)
7781 {
7782 vfs_context_t ctx = ap->a_context;
7783 struct nfsmount *nmp;
7784 struct nfs_vattr nvattr;
7785 struct componentname cn;
7786 nfsnode_t anp;
7787 int error = 0, isrsrcfork;
7788
7789 nmp = VTONMP(ap->a_vp);
7790 if (nfs_mount_gone(nmp))
7791 return (ENXIO);
7792
7793 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7794 return (ENOTSUP);
7795 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
7796 if (error)
7797 return (error);
7798 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7799 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7800 return (ENOATTR);
7801
7802 bzero(&cn, sizeof(cn));
7803 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
7804 cn.cn_namelen = strlen(ap->a_name);
7805 cn.cn_nameiop = LOOKUP;
7806 cn.cn_flags = MAKEENTRY;
7807
7808 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7809 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7810
7811 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7812 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
7813 if ((!error && !anp) || (error == ENOENT))
7814 error = ENOATTR;
7815 if (!error) {
7816 if (ap->a_uio)
7817 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
7818 else
7819 *ap->a_size = anp->n_size;
7820 }
7821 if (anp)
7822 vnode_put(NFSTOV(anp));
7823 return (error);
7824 }
7825
7826 int
7827 nfs4_vnop_setxattr(
7828 struct vnop_setxattr_args /* {
7829 struct vnodeop_desc *a_desc;
7830 vnode_t a_vp;
7831 const char * a_name;
7832 uio_t a_uio;
7833 int a_options;
7834 vfs_context_t a_context;
7835 } */ *ap)
7836 {
7837 vfs_context_t ctx = ap->a_context;
7838 int options = ap->a_options;
7839 uio_t uio = ap->a_uio;
7840 const char *name = ap->a_name;
7841 struct nfsmount *nmp;
7842 struct componentname cn;
7843 nfsnode_t anp = NULL;
7844 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
7845 #define FINDERINFOSIZE 32
7846 uint8_t finfo[FINDERINFOSIZE];
7847 uint32_t *finfop;
7848 struct nfs_open_file *nofp = NULL;
7849 char uio_buf [ UIO_SIZEOF(1) ];
7850 uio_t auio;
7851 struct vnop_write_args vwa;
7852
7853 nmp = VTONMP(ap->a_vp);
7854 if (nfs_mount_gone(nmp))
7855 return (ENXIO);
7856
7857 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7858 return (ENOTSUP);
7859
7860 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE))
7861 return (EINVAL);
7862
7863 /* XXX limitation based on need to back up uio on short write */
7864 if (uio_iovcnt(uio) > 1) {
7865 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7866 return (EINVAL);
7867 }
7868
7869 bzero(&cn, sizeof(cn));
7870 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7871 cn.cn_namelen = strlen(name);
7872 cn.cn_nameiop = CREATE;
7873 cn.cn_flags = MAKEENTRY;
7874
7875 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
7876 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7877 if (!isrsrcfork)
7878 uio_setoffset(uio, 0);
7879 if (isfinderinfo) {
7880 if (uio_resid(uio) != sizeof(finfo))
7881 return (ERANGE);
7882 error = uiomove((char*)&finfo, sizeof(finfo), uio);
7883 if (error)
7884 return (error);
7885 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7886 empty = 1;
7887 for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++)
7888 if (finfop[i]) {
7889 empty = 0;
7890 break;
7891 }
7892 if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) {
7893 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7894 if (error == ENOENT)
7895 error = 0;
7896 return (error);
7897 }
7898 /* first, let's see if we get a create/replace error */
7899 }
7900
7901 /*
7902 * create/open the xattr
7903 *
7904 * We need to make sure not to create it if XATTR_REPLACE.
7905 * For all xattrs except the resource fork, we also want to
7906 * truncate the xattr to remove any current data. We'll do
7907 * that by setting the size to 0 on create/open.
7908 */
7909 flags = 0;
7910 if (!(options & XATTR_REPLACE))
7911 flags |= NFS_GET_NAMED_ATTR_CREATE;
7912 if (options & XATTR_CREATE)
7913 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
7914 if (!isrsrcfork)
7915 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
7916
7917 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
7918 flags, ctx, &anp, &nofp);
7919 if (!error && !anp)
7920 error = ENOATTR;
7921 if (error)
7922 goto out;
7923 /* grab the open state from the get/create/open */
7924 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
7925 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
7926 nofp->nof_creator = NULL;
7927 nfs_open_file_clear_busy(nofp);
7928 }
7929
7930 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7931 if (isfinderinfo && empty)
7932 goto doclose;
7933
7934 /*
7935 * Write the data out and flush.
7936 *
7937 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7938 */
7939 vwa.a_desc = &vnop_write_desc;
7940 vwa.a_vp = NFSTOV(anp);
7941 vwa.a_uio = NULL;
7942 vwa.a_ioflag = 0;
7943 vwa.a_context = ctx;
7944 if (isfinderinfo) {
7945 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
7946 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
7947 vwa.a_uio = auio;
7948 } else if (uio_resid(uio) > 0) {
7949 vwa.a_uio = uio;
7950 }
7951 if (vwa.a_uio) {
7952 error = nfs_vnop_write(&vwa);
7953 if (!error)
7954 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
7955 }
7956 doclose:
7957 /* Close the xattr. */
7958 if (nofp) {
7959 int busyerror = nfs_open_file_set_busy(nofp, NULL);
7960 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
7961 if (!busyerror)
7962 nfs_open_file_clear_busy(nofp);
7963 }
7964 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
7965 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7966 if (error == ENOENT)
7967 error = 0;
7968 }
7969 if (!error)
7970 error = closeerror;
7971 out:
7972 if (anp)
7973 vnode_put(NFSTOV(anp));
7974 if (error == ENOENT)
7975 error = ENOATTR;
7976 return (error);
7977 }
7978
7979 int
7980 nfs4_vnop_removexattr(
7981 struct vnop_removexattr_args /* {
7982 struct vnodeop_desc *a_desc;
7983 vnode_t a_vp;
7984 const char * a_name;
7985 int a_options;
7986 vfs_context_t a_context;
7987 } */ *ap)
7988 {
7989 struct nfsmount *nmp = VTONMP(ap->a_vp);
7990 int error;
7991
7992 if (nfs_mount_gone(nmp))
7993 return (ENXIO);
7994 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7995 return (ENOTSUP);
7996
7997 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
7998 if (error == ENOENT)
7999 error = ENOATTR;
8000 return (error);
8001 }
8002
8003 int
8004 nfs4_vnop_listxattr(
8005 struct vnop_listxattr_args /* {
8006 struct vnodeop_desc *a_desc;
8007 vnode_t a_vp;
8008 uio_t a_uio;
8009 size_t *a_size;
8010 int a_options;
8011 vfs_context_t a_context;
8012 } */ *ap)
8013 {
8014 vfs_context_t ctx = ap->a_context;
8015 nfsnode_t np = VTONFS(ap->a_vp);
8016 uio_t uio = ap->a_uio;
8017 nfsnode_t adnp = NULL;
8018 struct nfsmount *nmp;
8019 int error, done, i;
8020 struct nfs_vattr nvattr;
8021 uint64_t cookie, nextcookie, lbn = 0;
8022 struct nfsbuf *bp = NULL;
8023 struct nfs_dir_buf_header *ndbhp;
8024 struct direntry *dp;
8025
8026 nmp = VTONMP(ap->a_vp);
8027 if (nfs_mount_gone(nmp))
8028 return (ENXIO);
8029
8030 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8031 return (ENOTSUP);
8032
8033 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8034 if (error)
8035 return (error);
8036 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8037 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8038 return (0);
8039
8040 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
8041 return (error);
8042 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8043 nfs_node_clear_busy(np);
8044 if (!adnp)
8045 goto out;
8046
8047 if ((error = nfs_node_lock(adnp)))
8048 goto out;
8049
8050 if (adnp->n_flag & NNEEDINVALIDATE) {
8051 adnp->n_flag &= ~NNEEDINVALIDATE;
8052 nfs_invaldir(adnp);
8053 nfs_node_unlock(adnp);
8054 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8055 if (!error)
8056 error = nfs_node_lock(adnp);
8057 if (error)
8058 goto out;
8059 }
8060
8061 /*
8062 * check for need to invalidate when (re)starting at beginning
8063 */
8064 if (adnp->n_flag & NMODIFIED) {
8065 nfs_invaldir(adnp);
8066 nfs_node_unlock(adnp);
8067 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1)))
8068 goto out;
8069 } else {
8070 nfs_node_unlock(adnp);
8071 }
8072 /* nfs_getattr() will check changed and purge caches */
8073 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED)))
8074 goto out;
8075
8076 if (uio && (uio_resid(uio) == 0))
8077 goto out;
8078
8079 done = 0;
8080 nextcookie = lbn = 0;
8081
8082 while (!error && !done) {
8083 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8084 cookie = nextcookie;
8085 getbuffer:
8086 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8087 if (error)
8088 goto out;
8089 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8090 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8091 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8092 ndbhp->ndbh_flags = 0;
8093 ndbhp->ndbh_count = 0;
8094 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8095 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8096 }
8097 error = nfs_buf_readdir(bp, ctx);
8098 if (error == NFSERR_DIRBUFDROPPED)
8099 goto getbuffer;
8100 if (error)
8101 nfs_buf_release(bp, 1);
8102 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8103 if (!nfs_node_lock(adnp)) {
8104 nfs_invaldir(adnp);
8105 nfs_node_unlock(adnp);
8106 }
8107 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8108 if (error == NFSERR_BAD_COOKIE)
8109 error = ENOENT;
8110 }
8111 if (error)
8112 goto out;
8113 }
8114
8115 /* go through all the entries copying/counting */
8116 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8117 for (i=0; i < ndbhp->ndbh_count; i++) {
8118 if (!xattr_protected(dp->d_name)) {
8119 if (uio == NULL) {
8120 *ap->a_size += dp->d_namlen + 1;
8121 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8122 error = ERANGE;
8123 } else {
8124 error = uiomove(dp->d_name, dp->d_namlen+1, uio);
8125 if (error && (error != EFAULT))
8126 error = ERANGE;
8127 }
8128 }
8129 nextcookie = dp->d_seekoff;
8130 dp = NFS_DIRENTRY_NEXT(dp);
8131 }
8132
8133 if (i == ndbhp->ndbh_count) {
8134 /* hit end of buffer, move to next buffer */
8135 lbn = nextcookie;
8136 /* if we also hit EOF, we're done */
8137 if (ISSET(ndbhp->ndbh_flags, NDB_EOF))
8138 done = 1;
8139 }
8140 if (!error && !done && (nextcookie == cookie)) {
8141 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8142 error = EIO;
8143 }
8144 nfs_buf_release(bp, 1);
8145 }
8146 out:
8147 if (adnp)
8148 vnode_put(NFSTOV(adnp));
8149 return (error);
8150 }
8151
8152 #if NAMEDSTREAMS
8153 int
8154 nfs4_vnop_getnamedstream(
8155 struct vnop_getnamedstream_args /* {
8156 struct vnodeop_desc *a_desc;
8157 vnode_t a_vp;
8158 vnode_t *a_svpp;
8159 const char *a_name;
8160 enum nsoperation a_operation;
8161 int a_flags;
8162 vfs_context_t a_context;
8163 } */ *ap)
8164 {
8165 vfs_context_t ctx = ap->a_context;
8166 struct nfsmount *nmp;
8167 struct nfs_vattr nvattr;
8168 struct componentname cn;
8169 nfsnode_t anp;
8170 int error = 0;
8171
8172 nmp = VTONMP(ap->a_vp);
8173 if (nfs_mount_gone(nmp))
8174 return (ENXIO);
8175
8176 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8177 return (ENOTSUP);
8178 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8179 if (error)
8180 return (error);
8181 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8182 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8183 return (ENOATTR);
8184
8185 bzero(&cn, sizeof(cn));
8186 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8187 cn.cn_namelen = strlen(ap->a_name);
8188 cn.cn_nameiop = LOOKUP;
8189 cn.cn_flags = MAKEENTRY;
8190
8191 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8192 0, ctx, &anp, NULL);
8193 if ((!error && !anp) || (error == ENOENT))
8194 error = ENOATTR;
8195 if (!error && anp)
8196 *ap->a_svpp = NFSTOV(anp);
8197 else if (anp)
8198 vnode_put(NFSTOV(anp));
8199 return (error);
8200 }
8201
8202 int
8203 nfs4_vnop_makenamedstream(
8204 struct vnop_makenamedstream_args /* {
8205 struct vnodeop_desc *a_desc;
8206 vnode_t *a_svpp;
8207 vnode_t a_vp;
8208 const char *a_name;
8209 int a_flags;
8210 vfs_context_t a_context;
8211 } */ *ap)
8212 {
8213 vfs_context_t ctx = ap->a_context;
8214 struct nfsmount *nmp;
8215 struct componentname cn;
8216 nfsnode_t anp;
8217 int error = 0;
8218
8219 nmp = VTONMP(ap->a_vp);
8220 if (nfs_mount_gone(nmp))
8221 return (ENXIO);
8222
8223 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8224 return (ENOTSUP);
8225
8226 bzero(&cn, sizeof(cn));
8227 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8228 cn.cn_namelen = strlen(ap->a_name);
8229 cn.cn_nameiop = CREATE;
8230 cn.cn_flags = MAKEENTRY;
8231
8232 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8233 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8234 if ((!error && !anp) || (error == ENOENT))
8235 error = ENOATTR;
8236 if (!error && anp)
8237 *ap->a_svpp = NFSTOV(anp);
8238 else if (anp)
8239 vnode_put(NFSTOV(anp));
8240 return (error);
8241 }
8242
8243 int
8244 nfs4_vnop_removenamedstream(
8245 struct vnop_removenamedstream_args /* {
8246 struct vnodeop_desc *a_desc;
8247 vnode_t a_vp;
8248 vnode_t a_svp;
8249 const char *a_name;
8250 int a_flags;
8251 vfs_context_t a_context;
8252 } */ *ap)
8253 {
8254 struct nfsmount *nmp = VTONMP(ap->a_vp);
8255 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8256 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8257
8258 if (nfs_mount_gone(nmp))
8259 return (ENXIO);
8260
8261 /*
8262 * Given that a_svp is a named stream, checking for
8263 * named attribute support is kinda pointless.
8264 */
8265 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8266 return (ENOTSUP);
8267
8268 return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context));
8269 }
8270
8271 #endif