]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
52
53 #include <vfs/vfs_support.h>
54
55 #include <sys/vm.h>
56
57 #include <sys/time.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
60
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
63
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
66 #include <nfs/nfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
73
74 #include <net/if.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
78
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
81
82 int
83 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
84 {
85 int error = 0, lockerror = ENOENT, status, numops, slot;
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
89 uint32_t access_result = 0, supported = 0, missing;
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
93 struct nfsreq_secinfo_args si;
94
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
96 return (0);
97
98 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
99 nfsm_chain_null(&nmreq);
100 nfsm_chain_null(&nmrep);
101
102 // PUTFH, ACCESS, GETATTR
103 numops = 3;
104 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
105 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
106 numops--;
107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
108 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
109 numops--;
110 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
111 nfsm_chain_add_32(error, &nmreq, *access);
112 numops--;
113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
114 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
115 nfsm_chain_build_done(error, &nmreq);
116 nfsm_assert(error, (numops == 0), EPROTO);
117 nfsmout_if(error);
118 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
119 vfs_context_thread(ctx), vfs_context_ucred(ctx),
120 &si, rpcflags, &nmrep, &xid, &status);
121
122 if ((lockerror = nfs_node_lock(np)))
123 error = lockerror;
124 nfsm_chain_skip_tag(error, &nmrep);
125 nfsm_chain_get_32(error, &nmrep, numops);
126 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
127 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
128 nfsm_chain_get_32(error, &nmrep, supported);
129 nfsm_chain_get_32(error, &nmrep, access_result);
130 nfsmout_if(error);
131 if ((missing = (*access & ~supported))) {
132 /* missing support for something(s) we wanted */
133 if (missing & NFS_ACCESS_DELETE) {
134 /*
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
139 */
140 access_result |= NFS_ACCESS_DELETE;
141 }
142 }
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs) {
145 vnode_t dvp = NULLVP;
146 if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */
147 access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE);
148 else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD))
149 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
150 if (dvp != NULLVP)
151 vnode_put(dvp);
152 }
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE))
155 access_result |= NFS_ACCESS_DELETE;
156 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
157 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
158 nfsmout_if(error);
159
160 if (nfs_mount_gone(nmp)) {
161 error = ENXIO;
162 }
163 nfsmout_if(error);
164
165 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
166 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
167 } else {
168 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
169 }
170 slot = nfs_node_access_slot(np, uid, 1);
171 np->n_accessuid[slot] = uid;
172 microuptime(&now);
173 np->n_accessstamp[slot] = now.tv_sec;
174 np->n_access[slot] = access_result;
175
176 /* pass back the access returned with this request */
177 *access = np->n_access[slot];
178 nfsmout:
179 if (!lockerror)
180 nfs_node_unlock(np);
181 nfsm_chain_cleanup(&nmreq);
182 nfsm_chain_cleanup(&nmrep);
183 return (error);
184 }
185
186 int
187 nfs4_getattr_rpc(
188 nfsnode_t np,
189 mount_t mp,
190 u_char *fhp,
191 size_t fhsize,
192 int flags,
193 vfs_context_t ctx,
194 struct nfs_vattr *nvap,
195 u_int64_t *xidp)
196 {
197 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
198 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
199 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
200 struct nfsm_chain nmreq, nmrep;
201 struct nfsreq_secinfo_args si;
202
203 if (nfs_mount_gone(nmp))
204 return (ENXIO);
205 nfsvers = nmp->nm_vers;
206 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
207
208 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
209 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
210 return (0);
211 }
212
213 if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
214 rpcflags = R_RECOVER;
215
216 if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */
217 rpcflags |= R_SOFT;
218
219 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
220 nfsm_chain_null(&nmreq);
221 nfsm_chain_null(&nmrep);
222
223 // PUTFH, GETATTR
224 numops = 2;
225 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
226 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
227 numops--;
228 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
229 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
230 numops--;
231 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
233 if ((flags & NGA_ACL) && acls)
234 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
235 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
236 nfsm_chain_build_done(error, &nmreq);
237 nfsm_assert(error, (numops == 0), EPROTO);
238 nfsmout_if(error);
239 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
240 vfs_context_thread(ctx), vfs_context_ucred(ctx),
241 NULL, rpcflags, &nmrep, xidp, &status);
242
243 nfsm_chain_skip_tag(error, &nmrep);
244 nfsm_chain_get_32(error, &nmrep, numops);
245 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
246 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
247 nfsmout_if(error);
248 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
249 nfsmout_if(error);
250 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
251 /* we asked for the ACL but didn't get one... assume there isn't one */
252 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
253 nvap->nva_acl = NULL;
254 }
255 nfsmout:
256 nfsm_chain_cleanup(&nmreq);
257 nfsm_chain_cleanup(&nmrep);
258 return (error);
259 }
260
261 int
262 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
263 {
264 struct nfsmount *nmp;
265 int error = 0, lockerror = ENOENT, status, numops;
266 uint32_t len = 0;
267 u_int64_t xid;
268 struct nfsm_chain nmreq, nmrep;
269 struct nfsreq_secinfo_args si;
270
271 nmp = NFSTONMP(np);
272 if (nfs_mount_gone(nmp))
273 return (ENXIO);
274 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
275 return (EINVAL);
276 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
277 nfsm_chain_null(&nmreq);
278 nfsm_chain_null(&nmrep);
279
280 // PUTFH, GETATTR, READLINK
281 numops = 3;
282 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
283 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
284 numops--;
285 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
286 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
287 numops--;
288 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
289 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
290 numops--;
291 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
292 nfsm_chain_build_done(error, &nmreq);
293 nfsm_assert(error, (numops == 0), EPROTO);
294 nfsmout_if(error);
295 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
296
297 if ((lockerror = nfs_node_lock(np)))
298 error = lockerror;
299 nfsm_chain_skip_tag(error, &nmrep);
300 nfsm_chain_get_32(error, &nmrep, numops);
301 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
302 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
303 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
304 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
305 nfsm_chain_get_32(error, &nmrep, len);
306 nfsmout_if(error);
307 if (len >= *buflenp) {
308 if (np->n_size && (np->n_size < *buflenp))
309 len = np->n_size;
310 else
311 len = *buflenp - 1;
312 }
313 nfsm_chain_get_opaque(error, &nmrep, len, buf);
314 if (!error)
315 *buflenp = len;
316 nfsmout:
317 if (!lockerror)
318 nfs_node_unlock(np);
319 nfsm_chain_cleanup(&nmreq);
320 nfsm_chain_cleanup(&nmrep);
321 return (error);
322 }
323
324 int
325 nfs4_read_rpc_async(
326 nfsnode_t np,
327 off_t offset,
328 size_t len,
329 thread_t thd,
330 kauth_cred_t cred,
331 struct nfsreq_cbinfo *cb,
332 struct nfsreq **reqp)
333 {
334 struct nfsmount *nmp;
335 int error = 0, nfsvers, numops;
336 nfs_stateid stateid;
337 struct nfsm_chain nmreq;
338 struct nfsreq_secinfo_args si;
339
340 nmp = NFSTONMP(np);
341 if (nfs_mount_gone(nmp))
342 return (ENXIO);
343 nfsvers = nmp->nm_vers;
344 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
345 return (EINVAL);
346
347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
348 nfsm_chain_null(&nmreq);
349
350 // PUTFH, READ, GETATTR
351 numops = 3;
352 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
353 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
354 numops--;
355 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
356 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
357 numops--;
358 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
359 nfs_get_stateid(np, thd, cred, &stateid);
360 nfsm_chain_add_stateid(error, &nmreq, &stateid);
361 nfsm_chain_add_64(error, &nmreq, offset);
362 nfsm_chain_add_32(error, &nmreq, len);
363 numops--;
364 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
365 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
366 nfsm_chain_build_done(error, &nmreq);
367 nfsm_assert(error, (numops == 0), EPROTO);
368 nfsmout_if(error);
369 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
370 nfsmout:
371 nfsm_chain_cleanup(&nmreq);
372 return (error);
373 }
374
375 int
376 nfs4_read_rpc_async_finish(
377 nfsnode_t np,
378 struct nfsreq *req,
379 uio_t uio,
380 size_t *lenp,
381 int *eofp)
382 {
383 struct nfsmount *nmp;
384 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
385 size_t retlen = 0;
386 u_int64_t xid;
387 struct nfsm_chain nmrep;
388
389 nmp = NFSTONMP(np);
390 if (nfs_mount_gone(nmp)) {
391 nfs_request_async_cancel(req);
392 return (ENXIO);
393 }
394 nfsvers = nmp->nm_vers;
395
396 nfsm_chain_null(&nmrep);
397
398 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
399 if (error == EINPROGRESS) /* async request restarted */
400 return (error);
401
402 if ((lockerror = nfs_node_lock(np)))
403 error = lockerror;
404 nfsm_chain_skip_tag(error, &nmrep);
405 nfsm_chain_get_32(error, &nmrep, numops);
406 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
407 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
408 nfsm_chain_get_32(error, &nmrep, eof);
409 nfsm_chain_get_32(error, &nmrep, retlen);
410 if (!error) {
411 *lenp = MIN(retlen, *lenp);
412 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
413 }
414 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
415 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
416 if (!lockerror)
417 nfs_node_unlock(np);
418 if (eofp) {
419 if (!eof && !retlen)
420 eof = 1;
421 *eofp = eof;
422 }
423 nfsm_chain_cleanup(&nmrep);
424 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
425 microuptime(&np->n_lastio);
426 return (error);
427 }
428
429 int
430 nfs4_write_rpc_async(
431 nfsnode_t np,
432 uio_t uio,
433 size_t len,
434 thread_t thd,
435 kauth_cred_t cred,
436 int iomode,
437 struct nfsreq_cbinfo *cb,
438 struct nfsreq **reqp)
439 {
440 struct nfsmount *nmp;
441 mount_t mp;
442 int error = 0, nfsvers, numops;
443 nfs_stateid stateid;
444 struct nfsm_chain nmreq;
445 struct nfsreq_secinfo_args si;
446
447 nmp = NFSTONMP(np);
448 if (nfs_mount_gone(nmp))
449 return (ENXIO);
450 nfsvers = nmp->nm_vers;
451 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
452 return (EINVAL);
453
454 /* for async mounts, don't bother sending sync write requests */
455 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
456 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
457 iomode = NFS_WRITE_UNSTABLE;
458
459 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
460 nfsm_chain_null(&nmreq);
461
462 // PUTFH, WRITE, GETATTR
463 numops = 3;
464 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
465 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
466 numops--;
467 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
468 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
469 numops--;
470 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
471 nfs_get_stateid(np, thd, cred, &stateid);
472 nfsm_chain_add_stateid(error, &nmreq, &stateid);
473 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
474 nfsm_chain_add_32(error, &nmreq, iomode);
475 nfsm_chain_add_32(error, &nmreq, len);
476 if (!error)
477 error = nfsm_chain_add_uio(&nmreq, uio, len);
478 numops--;
479 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
480 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
481 nfsm_chain_build_done(error, &nmreq);
482 nfsm_assert(error, (numops == 0), EPROTO);
483 nfsmout_if(error);
484
485 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
486 nfsmout:
487 nfsm_chain_cleanup(&nmreq);
488 return (error);
489 }
490
491 int
492 nfs4_write_rpc_async_finish(
493 nfsnode_t np,
494 struct nfsreq *req,
495 int *iomodep,
496 size_t *rlenp,
497 uint64_t *wverfp)
498 {
499 struct nfsmount *nmp;
500 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
501 int committed = NFS_WRITE_FILESYNC;
502 size_t rlen = 0;
503 u_int64_t xid, wverf;
504 mount_t mp;
505 struct nfsm_chain nmrep;
506
507 nmp = NFSTONMP(np);
508 if (nfs_mount_gone(nmp)) {
509 nfs_request_async_cancel(req);
510 return (ENXIO);
511 }
512 nfsvers = nmp->nm_vers;
513
514 nfsm_chain_null(&nmrep);
515
516 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
517 if (error == EINPROGRESS) /* async request restarted */
518 return (error);
519 nmp = NFSTONMP(np);
520 if (nfs_mount_gone(nmp))
521 error = ENXIO;
522 if (!error && (lockerror = nfs_node_lock(np)))
523 error = lockerror;
524 nfsm_chain_skip_tag(error, &nmrep);
525 nfsm_chain_get_32(error, &nmrep, numops);
526 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
527 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
528 nfsm_chain_get_32(error, &nmrep, rlen);
529 nfsmout_if(error);
530 *rlenp = rlen;
531 if (rlen <= 0)
532 error = NFSERR_IO;
533 nfsm_chain_get_32(error, &nmrep, committed);
534 nfsm_chain_get_64(error, &nmrep, wverf);
535 nfsmout_if(error);
536 if (wverfp)
537 *wverfp = wverf;
538 lck_mtx_lock(&nmp->nm_lock);
539 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
540 nmp->nm_verf = wverf;
541 nmp->nm_state |= NFSSTA_HASWRITEVERF;
542 } else if (nmp->nm_verf != wverf) {
543 nmp->nm_verf = wverf;
544 }
545 lck_mtx_unlock(&nmp->nm_lock);
546 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
547 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
548 nfsmout:
549 if (!lockerror)
550 nfs_node_unlock(np);
551 nfsm_chain_cleanup(&nmrep);
552 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
553 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
554 committed = NFS_WRITE_FILESYNC;
555 *iomodep = committed;
556 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
557 microuptime(&np->n_lastio);
558 return (error);
559 }
560
561 int
562 nfs4_remove_rpc(
563 nfsnode_t dnp,
564 char *name,
565 int namelen,
566 thread_t thd,
567 kauth_cred_t cred)
568 {
569 int error = 0, lockerror = ENOENT, remove_error = 0, status;
570 struct nfsmount *nmp;
571 int nfsvers, numops;
572 u_int64_t xid;
573 struct nfsm_chain nmreq, nmrep;
574 struct nfsreq_secinfo_args si;
575
576 nmp = NFSTONMP(dnp);
577 if (nfs_mount_gone(nmp))
578 return (ENXIO);
579 nfsvers = nmp->nm_vers;
580 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
581 return (EINVAL);
582 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
583 restart:
584 nfsm_chain_null(&nmreq);
585 nfsm_chain_null(&nmrep);
586
587 // PUTFH, REMOVE, GETATTR
588 numops = 3;
589 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
590 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
591 numops--;
592 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
593 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
594 numops--;
595 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
596 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
597 numops--;
598 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
599 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
600 nfsm_chain_build_done(error, &nmreq);
601 nfsm_assert(error, (numops == 0), EPROTO);
602 nfsmout_if(error);
603
604 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
605
606 if ((lockerror = nfs_node_lock(dnp)))
607 error = lockerror;
608 nfsm_chain_skip_tag(error, &nmrep);
609 nfsm_chain_get_32(error, &nmrep, numops);
610 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
611 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
612 remove_error = error;
613 nfsm_chain_check_change_info(error, &nmrep, dnp);
614 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
615 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
616 if (error && !lockerror)
617 NATTRINVALIDATE(dnp);
618 nfsmout:
619 nfsm_chain_cleanup(&nmreq);
620 nfsm_chain_cleanup(&nmrep);
621
622 if (!lockerror) {
623 dnp->n_flag |= NMODIFIED;
624 nfs_node_unlock(dnp);
625 }
626 if (error == NFSERR_GRACE) {
627 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
628 goto restart;
629 }
630
631 return (remove_error);
632 }
633
634 int
635 nfs4_rename_rpc(
636 nfsnode_t fdnp,
637 char *fnameptr,
638 int fnamelen,
639 nfsnode_t tdnp,
640 char *tnameptr,
641 int tnamelen,
642 vfs_context_t ctx)
643 {
644 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
645 struct nfsmount *nmp;
646 u_int64_t xid, savedxid;
647 struct nfsm_chain nmreq, nmrep;
648 struct nfsreq_secinfo_args si;
649
650 nmp = NFSTONMP(fdnp);
651 if (nfs_mount_gone(nmp))
652 return (ENXIO);
653 nfsvers = nmp->nm_vers;
654 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
655 return (EINVAL);
656 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
657 return (EINVAL);
658
659 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
660 nfsm_chain_null(&nmreq);
661 nfsm_chain_null(&nmrep);
662
663 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
664 numops = 7;
665 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
666 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
667 numops--;
668 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
669 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
670 numops--;
671 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
672 numops--;
673 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
674 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
675 numops--;
676 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
677 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
678 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
679 numops--;
680 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
681 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
682 numops--;
683 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
684 numops--;
685 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
686 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
687 nfsm_chain_build_done(error, &nmreq);
688 nfsm_assert(error, (numops == 0), EPROTO);
689 nfsmout_if(error);
690
691 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
692
693 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
694 error = lockerror;
695 nfsm_chain_skip_tag(error, &nmrep);
696 nfsm_chain_get_32(error, &nmrep, numops);
697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
698 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
699 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
700 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
701 nfsm_chain_check_change_info(error, &nmrep, fdnp);
702 nfsm_chain_check_change_info(error, &nmrep, tdnp);
703 /* directory attributes: if we don't get them, make sure to invalidate */
704 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
705 savedxid = xid;
706 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
707 if (error && !lockerror)
708 NATTRINVALIDATE(tdnp);
709 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
710 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
711 xid = savedxid;
712 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
713 if (error && !lockerror)
714 NATTRINVALIDATE(fdnp);
715 nfsmout:
716 nfsm_chain_cleanup(&nmreq);
717 nfsm_chain_cleanup(&nmrep);
718 if (!lockerror) {
719 fdnp->n_flag |= NMODIFIED;
720 tdnp->n_flag |= NMODIFIED;
721 nfs_node_unlock2(fdnp, tdnp);
722 }
723 return (error);
724 }
725
726 /*
727 * NFS V4 readdir RPC.
728 */
729 int
730 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
731 {
732 struct nfsmount *nmp;
733 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
734 int i, status, more_entries = 1, eof, bp_dropped = 0;
735 uint32_t nmreaddirsize, nmrsize;
736 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
737 uint64_t cookie, lastcookie, xid, savedxid;
738 struct nfsm_chain nmreq, nmrep, nmrepsave;
739 fhandle_t fh;
740 struct nfs_vattr nvattr, *nvattrp;
741 struct nfs_dir_buf_header *ndbhp;
742 struct direntry *dp;
743 char *padstart, padlen;
744 const char *tag;
745 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
746 struct timeval now;
747 struct nfsreq_secinfo_args si;
748
749 nmp = NFSTONMP(dnp);
750 if (nfs_mount_gone(nmp))
751 return (ENXIO);
752 nfsvers = nmp->nm_vers;
753 nmreaddirsize = nmp->nm_readdirsize;
754 nmrsize = nmp->nm_rsize;
755 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
756 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
757 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
758 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
759 return (EINVAL);
760 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
761
762 /*
763 * Set up attribute request for entries.
764 * For READDIRPLUS functionality, get everything.
765 * Otherwise, just get what we need for struct direntry.
766 */
767 if (rdirplus) {
768 tag = "readdirplus";
769 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
770 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
771 } else {
772 tag = "readdir";
773 NFS_CLEAR_ATTRIBUTES(entry_attrs);
774 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
775 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
776 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
777 }
778 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
779
780 /* lock to protect access to cookie verifier */
781 if ((lockerror = nfs_node_lock(dnp)))
782 return (lockerror);
783
784 /* determine cookie to use, and move dp to the right offset */
785 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
786 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
787 if (ndbhp->ndbh_count) {
788 for (i=0; i < ndbhp->ndbh_count-1; i++)
789 dp = NFS_DIRENTRY_NEXT(dp);
790 cookie = dp->d_seekoff;
791 dp = NFS_DIRENTRY_NEXT(dp);
792 } else {
793 cookie = bp->nb_lblkno;
794 /* increment with every buffer read */
795 OSAddAtomic64(1, &nfsstats.readdir_bios);
796 }
797 lastcookie = cookie;
798
799 /*
800 * The NFS client is responsible for the "." and ".." entries in the
801 * directory. So, we put them at the start of the first buffer.
802 * Don't bother for attribute directories.
803 */
804 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
805 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
806 fh.fh_len = 0;
807 fhlen = rdirplus ? fh.fh_len + 1 : 0;
808 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
809 /* "." */
810 namlen = 1;
811 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
812 if (xlen)
813 bzero(&dp->d_name[namlen+1], xlen);
814 dp->d_namlen = namlen;
815 strlcpy(dp->d_name, ".", namlen+1);
816 dp->d_fileno = dnp->n_vattr.nva_fileid;
817 dp->d_type = DT_DIR;
818 dp->d_reclen = reclen;
819 dp->d_seekoff = 1;
820 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
821 dp = NFS_DIRENTRY_NEXT(dp);
822 padlen = (char*)dp - padstart;
823 if (padlen > 0)
824 bzero(padstart, padlen);
825 if (rdirplus) /* zero out attributes */
826 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
827
828 /* ".." */
829 namlen = 2;
830 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
831 if (xlen)
832 bzero(&dp->d_name[namlen+1], xlen);
833 dp->d_namlen = namlen;
834 strlcpy(dp->d_name, "..", namlen+1);
835 if (dnp->n_parent)
836 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
837 else
838 dp->d_fileno = dnp->n_vattr.nva_fileid;
839 dp->d_type = DT_DIR;
840 dp->d_reclen = reclen;
841 dp->d_seekoff = 2;
842 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
843 dp = NFS_DIRENTRY_NEXT(dp);
844 padlen = (char*)dp - padstart;
845 if (padlen > 0)
846 bzero(padstart, padlen);
847 if (rdirplus) /* zero out attributes */
848 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
849
850 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
851 ndbhp->ndbh_count = 2;
852 }
853
854 /*
855 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
856 * the buffer is full (or we hit EOF). Then put the remainder of the
857 * results in the next buffer(s).
858 */
859 nfsm_chain_null(&nmreq);
860 nfsm_chain_null(&nmrep);
861 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
862
863 // PUTFH, GETATTR, READDIR
864 numops = 3;
865 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
866 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
867 numops--;
868 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
869 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
870 numops--;
871 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
872 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
873 numops--;
874 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
875 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
876 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
877 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
878 nfsm_chain_add_32(error, &nmreq, nmrsize);
879 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
880 nfsm_chain_build_done(error, &nmreq);
881 nfsm_assert(error, (numops == 0), EPROTO);
882 nfs_node_unlock(dnp);
883 nfsmout_if(error);
884 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
885
886 if ((lockerror = nfs_node_lock(dnp)))
887 error = lockerror;
888
889 savedxid = xid;
890 nfsm_chain_skip_tag(error, &nmrep);
891 nfsm_chain_get_32(error, &nmrep, numops);
892 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
893 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
894 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
895 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
896 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
897 nfsm_chain_get_32(error, &nmrep, more_entries);
898
899 if (!lockerror) {
900 nfs_node_unlock(dnp);
901 lockerror = ENOENT;
902 }
903 nfsmout_if(error);
904
905 if (rdirplus)
906 microuptime(&now);
907
908 /* loop through the entries packing them into the buffer */
909 while (more_entries) {
910 /* Entry: COOKIE, NAME, FATTR */
911 nfsm_chain_get_64(error, &nmrep, cookie);
912 nfsm_chain_get_32(error, &nmrep, namlen);
913 nfsmout_if(error);
914 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
915 /* we've got a big cookie, make sure flag is set */
916 lck_mtx_lock(&nmp->nm_lock);
917 nmp->nm_state |= NFSSTA_BIGCOOKIES;
918 lck_mtx_unlock(&nmp->nm_lock);
919 bigcookies = 1;
920 }
921 /* just truncate names that don't fit in direntry.d_name */
922 if (namlen <= 0) {
923 error = EBADRPC;
924 goto nfsmout;
925 }
926 if (namlen > (sizeof(dp->d_name)-1)) {
927 skiplen = namlen - sizeof(dp->d_name) + 1;
928 namlen = sizeof(dp->d_name) - 1;
929 } else {
930 skiplen = 0;
931 }
932 /* guess that fh size will be same as parent */
933 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
934 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
935 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
936 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
937 space_needed = reclen + attrlen;
938 space_free = nfs_dir_buf_freespace(bp, rdirplus);
939 if (space_needed > space_free) {
940 /*
941 * We still have entries to pack, but we've
942 * run out of room in the current buffer.
943 * So we need to move to the next buffer.
944 * The block# for the next buffer is the
945 * last cookie in the current buffer.
946 */
947 nextbuffer:
948 ndbhp->ndbh_flags |= NDB_FULL;
949 nfs_buf_release(bp, 0);
950 bp_dropped = 1;
951 bp = NULL;
952 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
953 nfsmout_if(error);
954 /* initialize buffer */
955 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
956 ndbhp->ndbh_flags = 0;
957 ndbhp->ndbh_count = 0;
958 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
959 ndbhp->ndbh_ncgen = dnp->n_ncgen;
960 space_free = nfs_dir_buf_freespace(bp, rdirplus);
961 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
962 /* increment with every buffer read */
963 OSAddAtomic64(1, &nfsstats.readdir_bios);
964 }
965 nmrepsave = nmrep;
966 dp->d_fileno = cookie; /* placeholder */
967 dp->d_seekoff = cookie;
968 dp->d_namlen = namlen;
969 dp->d_reclen = reclen;
970 dp->d_type = DT_UNKNOWN;
971 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
972 nfsmout_if(error);
973 dp->d_name[namlen] = '\0';
974 if (skiplen)
975 nfsm_chain_adv(error, &nmrep,
976 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
977 nfsmout_if(error);
978 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
979 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
980 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
981 /* we do NOT want ACLs returned to us here */
982 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
983 if (nvattrp->nva_acl) {
984 kauth_acl_free(nvattrp->nva_acl);
985 nvattrp->nva_acl = NULL;
986 }
987 }
988 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
989 /* OK, we may not have gotten all of the attributes but we will use what we can. */
990 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
991 /* set this up to look like a referral trigger */
992 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
993 }
994 error = 0;
995 }
996 /* check for more entries after this one */
997 nfsm_chain_get_32(error, &nmrep, more_entries);
998 nfsmout_if(error);
999
1000 /* Skip any "." and ".." entries returned from server. */
1001 /* Also skip any bothersome named attribute entries. */
1002 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1003 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1004 lastcookie = cookie;
1005 continue;
1006 }
1007
1008 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
1009 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1010 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
1011 dp->d_fileno = nvattrp->nva_fileid;
1012 if (rdirplus) {
1013 /* fileid is already in d_fileno, so stash xid in attrs */
1014 nvattrp->nva_fileid = savedxid;
1015 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1016 fhlen = fh.fh_len + 1;
1017 xlen = fhlen + sizeof(time_t);
1018 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1019 space_needed = reclen + attrlen;
1020 if (space_needed > space_free) {
1021 /* didn't actually have the room... move on to next buffer */
1022 nmrep = nmrepsave;
1023 goto nextbuffer;
1024 }
1025 /* pack the file handle into the record */
1026 dp->d_name[dp->d_namlen+1] = fh.fh_len;
1027 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
1028 } else {
1029 /* mark the file handle invalid */
1030 fh.fh_len = 0;
1031 fhlen = fh.fh_len + 1;
1032 xlen = fhlen + sizeof(time_t);
1033 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1034 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
1035 }
1036 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
1037 dp->d_reclen = reclen;
1038 }
1039 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1040 ndbhp->ndbh_count++;
1041 lastcookie = cookie;
1042
1043 /* advance to next direntry in buffer */
1044 dp = NFS_DIRENTRY_NEXT(dp);
1045 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1046 /* zero out the pad bytes */
1047 padlen = (char*)dp - padstart;
1048 if (padlen > 0)
1049 bzero(padstart, padlen);
1050 }
1051 /* Finally, get the eof boolean */
1052 nfsm_chain_get_32(error, &nmrep, eof);
1053 nfsmout_if(error);
1054 if (eof) {
1055 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
1056 nfs_node_lock_force(dnp);
1057 dnp->n_eofcookie = lastcookie;
1058 nfs_node_unlock(dnp);
1059 } else {
1060 more_entries = 1;
1061 }
1062 if (bp_dropped) {
1063 nfs_buf_release(bp, 0);
1064 bp = NULL;
1065 break;
1066 }
1067 if ((lockerror = nfs_node_lock(dnp)))
1068 error = lockerror;
1069 nfsmout_if(error);
1070 nfsm_chain_cleanup(&nmrep);
1071 nfsm_chain_null(&nmreq);
1072 }
1073 nfsmout:
1074 if (bp_dropped && bp)
1075 nfs_buf_release(bp, 0);
1076 if (!lockerror)
1077 nfs_node_unlock(dnp);
1078 nfsm_chain_cleanup(&nmreq);
1079 nfsm_chain_cleanup(&nmrep);
1080 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
1081 }
1082
1083 int
1084 nfs4_lookup_rpc_async(
1085 nfsnode_t dnp,
1086 char *name,
1087 int namelen,
1088 vfs_context_t ctx,
1089 struct nfsreq **reqp)
1090 {
1091 int error = 0, isdotdot = 0, nfsvers, numops;
1092 struct nfsm_chain nmreq;
1093 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1094 struct nfsmount *nmp;
1095 struct nfsreq_secinfo_args si;
1096
1097 nmp = NFSTONMP(dnp);
1098 if (nfs_mount_gone(nmp))
1099 return (ENXIO);
1100 nfsvers = nmp->nm_vers;
1101 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1102 return (EINVAL);
1103
1104 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1105 isdotdot = 1;
1106 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1107 } else {
1108 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1109 }
1110
1111 nfsm_chain_null(&nmreq);
1112
1113 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1114 numops = 5;
1115 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1116 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1117 numops--;
1118 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1119 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1120 numops--;
1121 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1122 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1123 numops--;
1124 if (isdotdot) {
1125 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1126 } else {
1127 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1128 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1129 }
1130 numops--;
1131 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1132 numops--;
1133 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1134 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1135 /* some ".zfs" directories can't handle being asked for some attributes */
1136 if ((dnp->n_flag & NISDOTZFS) && !isdotdot)
1137 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1138 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot)
1139 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1140 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's')))
1141 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1142 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1143 nfsm_chain_build_done(error, &nmreq);
1144 nfsm_assert(error, (numops == 0), EPROTO);
1145 nfsmout_if(error);
1146 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1147 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1148 nfsmout:
1149 nfsm_chain_cleanup(&nmreq);
1150 return (error);
1151 }
1152
1153
1154 int
1155 nfs4_lookup_rpc_async_finish(
1156 nfsnode_t dnp,
1157 char *name,
1158 int namelen,
1159 vfs_context_t ctx,
1160 struct nfsreq *req,
1161 u_int64_t *xidp,
1162 fhandle_t *fhp,
1163 struct nfs_vattr *nvap)
1164 {
1165 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1166 uint32_t op = NFS_OP_LOOKUP;
1167 u_int64_t xid;
1168 struct nfsmount *nmp;
1169 struct nfsm_chain nmrep;
1170
1171 nmp = NFSTONMP(dnp);
1172 if (nmp == NULL)
1173 return (ENXIO);
1174 nfsvers = nmp->nm_vers;
1175 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1176 isdotdot = 1;
1177
1178 nfsm_chain_null(&nmrep);
1179
1180 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1181
1182 if ((lockerror = nfs_node_lock(dnp)))
1183 error = lockerror;
1184 nfsm_chain_skip_tag(error, &nmrep);
1185 nfsm_chain_get_32(error, &nmrep, numops);
1186 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1187 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1188 if (xidp)
1189 *xidp = xid;
1190 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1191
1192 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1193 nfsmout_if(error || !fhp || !nvap);
1194 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1195 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1196 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data))
1197 error = EBADRPC;
1198 nfsmout_if(error);
1199 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1200 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1201 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1202 /* set this up to look like a referral trigger */
1203 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1204 error = 0;
1205 } else {
1206 nfsmout_if(error);
1207 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1208 }
1209 nfsmout:
1210 if (!lockerror)
1211 nfs_node_unlock(dnp);
1212 nfsm_chain_cleanup(&nmrep);
1213 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1214 /* We still need to get SECINFO to set default for mount. */
1215 /* Do so for the first LOOKUP that returns successfully. */
1216 struct nfs_sec sec;
1217
1218 sec.count = NX_MAX_SEC_FLAVORS;
1219 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1220 /* [sigh] some implementations return "illegal" error for unsupported ops */
1221 if (error == NFSERR_OP_ILLEGAL)
1222 error = 0;
1223 if (!error) {
1224 /* set our default security flavor to the first in the list */
1225 lck_mtx_lock(&nmp->nm_lock);
1226 if (sec.count)
1227 nmp->nm_auth = sec.flavors[0];
1228 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1229 lck_mtx_unlock(&nmp->nm_lock);
1230 }
1231 }
1232 return (error);
1233 }
1234
1235 int
1236 nfs4_commit_rpc(
1237 nfsnode_t np,
1238 uint64_t offset,
1239 uint64_t count,
1240 kauth_cred_t cred,
1241 uint64_t wverf)
1242 {
1243 struct nfsmount *nmp;
1244 int error = 0, lockerror, status, nfsvers, numops;
1245 u_int64_t xid, newwverf;
1246 uint32_t count32;
1247 struct nfsm_chain nmreq, nmrep;
1248 struct nfsreq_secinfo_args si;
1249
1250 nmp = NFSTONMP(np);
1251 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1252 if (nfs_mount_gone(nmp))
1253 return (ENXIO);
1254 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1255 return (EINVAL);
1256 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1257 return (0);
1258 nfsvers = nmp->nm_vers;
1259
1260 if (count > UINT32_MAX)
1261 count32 = 0;
1262 else
1263 count32 = count;
1264
1265 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1266 nfsm_chain_null(&nmreq);
1267 nfsm_chain_null(&nmrep);
1268
1269 // PUTFH, COMMIT, GETATTR
1270 numops = 3;
1271 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1272 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1273 numops--;
1274 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1275 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1276 numops--;
1277 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1278 nfsm_chain_add_64(error, &nmreq, offset);
1279 nfsm_chain_add_32(error, &nmreq, count32);
1280 numops--;
1281 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1282 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1283 nfsm_chain_build_done(error, &nmreq);
1284 nfsm_assert(error, (numops == 0), EPROTO);
1285 nfsmout_if(error);
1286 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1287 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1288
1289 if ((lockerror = nfs_node_lock(np)))
1290 error = lockerror;
1291 nfsm_chain_skip_tag(error, &nmrep);
1292 nfsm_chain_get_32(error, &nmrep, numops);
1293 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1294 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1295 nfsm_chain_get_64(error, &nmrep, newwverf);
1296 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1297 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1298 if (!lockerror)
1299 nfs_node_unlock(np);
1300 nfsmout_if(error);
1301 lck_mtx_lock(&nmp->nm_lock);
1302 if (nmp->nm_verf != newwverf)
1303 nmp->nm_verf = newwverf;
1304 if (wverf != newwverf)
1305 error = NFSERR_STALEWRITEVERF;
1306 lck_mtx_unlock(&nmp->nm_lock);
1307 nfsmout:
1308 nfsm_chain_cleanup(&nmreq);
1309 nfsm_chain_cleanup(&nmrep);
1310 return (error);
1311 }
1312
1313 int
1314 nfs4_pathconf_rpc(
1315 nfsnode_t np,
1316 struct nfs_fsattr *nfsap,
1317 vfs_context_t ctx)
1318 {
1319 u_int64_t xid;
1320 int error = 0, lockerror, status, nfsvers, numops;
1321 struct nfsm_chain nmreq, nmrep;
1322 struct nfsmount *nmp = NFSTONMP(np);
1323 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1324 struct nfs_vattr nvattr;
1325 struct nfsreq_secinfo_args si;
1326
1327 if (nfs_mount_gone(nmp))
1328 return (ENXIO);
1329 nfsvers = nmp->nm_vers;
1330 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1331 return (EINVAL);
1332
1333 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1334 NVATTR_INIT(&nvattr);
1335 nfsm_chain_null(&nmreq);
1336 nfsm_chain_null(&nmrep);
1337
1338 /* NFSv4: fetch "pathconf" info for this node */
1339 // PUTFH, GETATTR
1340 numops = 2;
1341 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1342 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1343 numops--;
1344 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1345 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1346 numops--;
1347 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1348 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1349 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1350 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1351 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1352 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1353 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1354 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1355 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1356 nfsm_chain_build_done(error, &nmreq);
1357 nfsm_assert(error, (numops == 0), EPROTO);
1358 nfsmout_if(error);
1359 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1360
1361 nfsm_chain_skip_tag(error, &nmrep);
1362 nfsm_chain_get_32(error, &nmrep, numops);
1363 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1364 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1365 nfsmout_if(error);
1366 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1367 nfsmout_if(error);
1368 if ((lockerror = nfs_node_lock(np)))
1369 error = lockerror;
1370 if (!error)
1371 nfs_loadattrcache(np, &nvattr, &xid, 0);
1372 if (!lockerror)
1373 nfs_node_unlock(np);
1374 nfsmout:
1375 NVATTR_CLEANUP(&nvattr);
1376 nfsm_chain_cleanup(&nmreq);
1377 nfsm_chain_cleanup(&nmrep);
1378 return (error);
1379 }
1380
1381 int
1382 nfs4_vnop_getattr(
1383 struct vnop_getattr_args /* {
1384 struct vnodeop_desc *a_desc;
1385 vnode_t a_vp;
1386 struct vnode_attr *a_vap;
1387 vfs_context_t a_context;
1388 } */ *ap)
1389 {
1390 struct vnode_attr *vap = ap->a_vap;
1391 struct nfsmount *nmp;
1392 struct nfs_vattr nva;
1393 int error, acls, ngaflags;
1394
1395 nmp = VTONMP(ap->a_vp);
1396 if (nfs_mount_gone(nmp))
1397 return (ENXIO);
1398 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1399
1400 ngaflags = NGA_CACHED;
1401 if (VATTR_IS_ACTIVE(vap, va_acl) && acls)
1402 ngaflags |= NGA_ACL;
1403 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1404 if (error)
1405 return (error);
1406
1407 vap->va_flags |= VA_64BITOBJIDS;
1408
1409 /* copy what we have in nva to *a_vap */
1410 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1411 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1412 VATTR_RETURN(vap, va_rdev, rdev);
1413 }
1414 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
1415 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1416 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
1417 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1418 // VATTR_RETURN(vap, va_data_alloc, ???);
1419 // VATTR_RETURN(vap, va_total_size, ???);
1420 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
1421 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1422 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1423 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1424 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1425 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1426 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1427 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1428 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1429 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1430 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1431 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1432 VATTR_RETURN(vap, va_mode, 0777);
1433 else
1434 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1435 }
1436 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1437 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1438 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1439 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1440 uint32_t flags = 0;
1441 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1442 (nva.nva_flags & NFS_FFLAG_ARCHIVED))
1443 flags |= SF_ARCHIVED;
1444 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1445 (nva.nva_flags & NFS_FFLAG_HIDDEN))
1446 flags |= UF_HIDDEN;
1447 VATTR_RETURN(vap, va_flags, flags);
1448 }
1449 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1450 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1451 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1452 VATTR_SET_SUPPORTED(vap, va_create_time);
1453 }
1454 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1455 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1456 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1457 VATTR_SET_SUPPORTED(vap, va_access_time);
1458 }
1459 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1460 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1461 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1462 VATTR_SET_SUPPORTED(vap, va_modify_time);
1463 }
1464 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1465 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1466 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1467 VATTR_SET_SUPPORTED(vap, va_change_time);
1468 }
1469 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1470 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1471 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1472 VATTR_SET_SUPPORTED(vap, va_backup_time);
1473 }
1474 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
1475 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1476 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
1477 VATTR_RETURN(vap, va_type, nva.nva_type);
1478 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
1479 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1480
1481 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1482 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1483 nva.nva_acl = NULL;
1484 }
1485
1486 // other attrs we might support someday:
1487 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1488
1489 NVATTR_CLEANUP(&nva);
1490 return (error);
1491 }
1492
1493 int
1494 nfs4_setattr_rpc(
1495 nfsnode_t np,
1496 struct vnode_attr *vap,
1497 vfs_context_t ctx)
1498 {
1499 struct nfsmount *nmp = NFSTONMP(np);
1500 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1501 u_int64_t xid, nextxid;
1502 struct nfsm_chain nmreq, nmrep;
1503 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1504 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1505 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1506 nfs_stateid stateid;
1507 struct nfsreq_secinfo_args si;
1508
1509 if (nfs_mount_gone(nmp))
1510 return (ENXIO);
1511 nfsvers = nmp->nm_vers;
1512 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1513 return (EINVAL);
1514
1515 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1516 /* we don't support setting unsupported flags (duh!) */
1517 if (vap->va_active & ~VNODE_ATTR_va_flags)
1518 return (EINVAL); /* return EINVAL if other attributes also set */
1519 else
1520 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1521 }
1522
1523 /* don't bother requesting some changes if they don't look like they are changing */
1524 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid))
1525 VATTR_CLEAR_ACTIVE(vap, va_uid);
1526 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid))
1527 VATTR_CLEAR_ACTIVE(vap, va_gid);
1528 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid))
1529 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1530 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid))
1531 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1532
1533 tryagain:
1534 /* do nothing if no attributes will be sent */
1535 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1536 if (!bitmap[0] && !bitmap[1])
1537 return (0);
1538
1539 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1540 nfsm_chain_null(&nmreq);
1541 nfsm_chain_null(&nmrep);
1542
1543 /*
1544 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1545 * need to invalidate any cached ACL. And if we had an ACL cached,
1546 * we might as well also fetch the new value.
1547 */
1548 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1549 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1550 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1551 if (NACLVALID(np))
1552 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1553 NACLINVALIDATE(np);
1554 }
1555
1556 // PUTFH, SETATTR, GETATTR
1557 numops = 3;
1558 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1559 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1560 numops--;
1561 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1562 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1563 numops--;
1564 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1565 if (VATTR_IS_ACTIVE(vap, va_data_size))
1566 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1567 else
1568 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1569 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1570 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1571 numops--;
1572 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1573 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1574 nfsm_chain_build_done(error, &nmreq);
1575 nfsm_assert(error, (numops == 0), EPROTO);
1576 nfsmout_if(error);
1577 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1578
1579 if ((lockerror = nfs_node_lock(np)))
1580 error = lockerror;
1581 nfsm_chain_skip_tag(error, &nmrep);
1582 nfsm_chain_get_32(error, &nmrep, numops);
1583 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1584 nfsmout_if(error);
1585 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1586 nfsmout_if(error == EBADRPC);
1587 setattr_error = error;
1588 error = 0;
1589 bmlen = NFS_ATTR_BITMAP_LEN;
1590 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1591 if (!error) {
1592 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
1593 microuptime(&np->n_lastio);
1594 nfs_vattr_set_supported(setbitmap, vap);
1595 error = setattr_error;
1596 }
1597 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1598 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1599 if (error)
1600 NATTRINVALIDATE(np);
1601 /*
1602 * We just changed the attributes and we want to make sure that we
1603 * see the latest attributes. Get the next XID. If it's not the
1604 * next XID after the SETATTR XID, then it's possible that another
1605 * RPC was in flight at the same time and it might put stale attributes
1606 * in the cache. In that case, we invalidate the attributes and set
1607 * the attribute cache XID to guarantee that newer attributes will
1608 * get loaded next.
1609 */
1610 nextxid = 0;
1611 nfs_get_xid(&nextxid);
1612 if (nextxid != (xid + 1)) {
1613 np->n_xid = nextxid;
1614 NATTRINVALIDATE(np);
1615 }
1616 nfsmout:
1617 if (!lockerror)
1618 nfs_node_unlock(np);
1619 nfsm_chain_cleanup(&nmreq);
1620 nfsm_chain_cleanup(&nmrep);
1621 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1622 /*
1623 * Some server's may not like ACL/mode combos that get sent.
1624 * If it looks like that's what the server choked on, try setting
1625 * just the ACL and not the mode (unless it looks like everything
1626 * but mode was already successfully set).
1627 */
1628 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1629 ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) {
1630 VATTR_CLEAR_ACTIVE(vap, va_mode);
1631 error = 0;
1632 goto tryagain;
1633 }
1634 }
1635 return (error);
1636 }
1637
1638 /*
1639 * Wait for any pending recovery to complete.
1640 */
1641 int
1642 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1643 {
1644 struct timespec ts = { 1, 0 };
1645 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1646
1647 lck_mtx_lock(&nmp->nm_lock);
1648 while (nmp->nm_state & NFSSTA_RECOVER) {
1649 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1650 break;
1651 nfs_mount_sock_thread_wake(nmp);
1652 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1653 slpflag = 0;
1654 }
1655 lck_mtx_unlock(&nmp->nm_lock);
1656
1657 return (error);
1658 }
1659
1660 /*
1661 * We're about to use/manipulate NFS mount's open/lock state.
1662 * Wait for any pending state recovery to complete, then
1663 * mark the state as being in use (which will hold off
1664 * the recovery thread until we're done).
1665 */
1666 int
1667 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1668 {
1669 struct timespec ts = { 1, 0 };
1670 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1671
1672 if (nfs_mount_gone(nmp))
1673 return (ENXIO);
1674 lck_mtx_lock(&nmp->nm_lock);
1675 if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
1676 lck_mtx_unlock(&nmp->nm_lock);
1677 return (ENXIO);
1678 }
1679 while (nmp->nm_state & NFSSTA_RECOVER) {
1680 if ((error = nfs_sigintr(nmp, NULL, thd, 1)))
1681 break;
1682 nfs_mount_sock_thread_wake(nmp);
1683 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1684 slpflag = 0;
1685 }
1686 if (!error)
1687 nmp->nm_stateinuse++;
1688 lck_mtx_unlock(&nmp->nm_lock);
1689
1690 return (error);
1691 }
1692
1693 /*
1694 * We're done using/manipulating the NFS mount's open/lock
1695 * state. If the given error indicates that recovery should
1696 * be performed, we'll initiate recovery.
1697 */
1698 int
1699 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1700 {
1701 int restart = nfs_mount_state_error_should_restart(error);
1702
1703 if (nfs_mount_gone(nmp))
1704 return (restart);
1705 lck_mtx_lock(&nmp->nm_lock);
1706 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1707 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1708 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1709 nfs_need_recover(nmp, error);
1710 }
1711 if (nmp->nm_stateinuse > 0)
1712 nmp->nm_stateinuse--;
1713 else
1714 panic("NFS mount state in use count underrun");
1715 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1716 wakeup(&nmp->nm_stateinuse);
1717 lck_mtx_unlock(&nmp->nm_lock);
1718 if (error == NFSERR_GRACE)
1719 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1720
1721 return (restart);
1722 }
1723
1724 /*
1725 * Does the error mean we should restart/redo a state-related operation?
1726 */
1727 int
1728 nfs_mount_state_error_should_restart(int error)
1729 {
1730 switch (error) {
1731 case NFSERR_STALE_STATEID:
1732 case NFSERR_STALE_CLIENTID:
1733 case NFSERR_ADMIN_REVOKED:
1734 case NFSERR_EXPIRED:
1735 case NFSERR_OLD_STATEID:
1736 case NFSERR_BAD_STATEID:
1737 case NFSERR_GRACE:
1738 return (1);
1739 }
1740 return (0);
1741 }
1742
1743 /*
1744 * In some cases we may want to limit how many times we restart a
1745 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1746 * Base the limit on the lease (as long as it's not too short).
1747 */
1748 uint
1749 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1750 {
1751 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1752 }
1753
1754 /*
1755 * Does the error mean we probably lost a delegation?
1756 */
1757 int
1758 nfs_mount_state_error_delegation_lost(int error)
1759 {
1760 switch (error) {
1761 case NFSERR_STALE_STATEID:
1762 case NFSERR_ADMIN_REVOKED:
1763 case NFSERR_EXPIRED:
1764 case NFSERR_OLD_STATEID:
1765 case NFSERR_BAD_STATEID:
1766 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1767 return (1);
1768 }
1769 return (0);
1770 }
1771
1772
1773 /*
1774 * Mark an NFS node's open state as busy.
1775 */
1776 int
1777 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1778 {
1779 struct nfsmount *nmp;
1780 struct timespec ts = {2, 0};
1781 int error = 0, slpflag;
1782
1783 nmp = NFSTONMP(np);
1784 if (nfs_mount_gone(nmp))
1785 return (ENXIO);
1786 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1787
1788 lck_mtx_lock(&np->n_openlock);
1789 while (np->n_openflags & N_OPENBUSY) {
1790 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1791 break;
1792 np->n_openflags |= N_OPENWANT;
1793 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1794 slpflag = 0;
1795 }
1796 if (!error)
1797 np->n_openflags |= N_OPENBUSY;
1798 lck_mtx_unlock(&np->n_openlock);
1799
1800 return (error);
1801 }
1802
1803 /*
1804 * Clear an NFS node's open state busy flag and wake up
1805 * anyone wanting it.
1806 */
1807 void
1808 nfs_open_state_clear_busy(nfsnode_t np)
1809 {
1810 int wanted;
1811
1812 lck_mtx_lock(&np->n_openlock);
1813 if (!(np->n_openflags & N_OPENBUSY))
1814 panic("nfs_open_state_clear_busy");
1815 wanted = (np->n_openflags & N_OPENWANT);
1816 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1817 lck_mtx_unlock(&np->n_openlock);
1818 if (wanted)
1819 wakeup(&np->n_openflags);
1820 }
1821
1822 /*
1823 * Search a mount's open owner list for the owner for this credential.
1824 * If not found and "alloc" is set, then allocate a new one.
1825 */
1826 struct nfs_open_owner *
1827 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1828 {
1829 uid_t uid = kauth_cred_getuid(cred);
1830 struct nfs_open_owner *noop, *newnoop = NULL;
1831
1832 tryagain:
1833 lck_mtx_lock(&nmp->nm_lock);
1834 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1835 if (kauth_cred_getuid(noop->noo_cred) == uid)
1836 break;
1837 }
1838
1839 if (!noop && !newnoop && alloc) {
1840 lck_mtx_unlock(&nmp->nm_lock);
1841 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1842 if (!newnoop)
1843 return (NULL);
1844 bzero(newnoop, sizeof(*newnoop));
1845 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1846 newnoop->noo_mount = nmp;
1847 kauth_cred_ref(cred);
1848 newnoop->noo_cred = cred;
1849 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1850 TAILQ_INIT(&newnoop->noo_opens);
1851 goto tryagain;
1852 }
1853 if (!noop && newnoop) {
1854 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1855 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1856 noop = newnoop;
1857 }
1858 lck_mtx_unlock(&nmp->nm_lock);
1859
1860 if (newnoop && (noop != newnoop))
1861 nfs_open_owner_destroy(newnoop);
1862
1863 if (noop)
1864 nfs_open_owner_ref(noop);
1865
1866 return (noop);
1867 }
1868
1869 /*
1870 * destroy an open owner that's no longer needed
1871 */
1872 void
1873 nfs_open_owner_destroy(struct nfs_open_owner *noop)
1874 {
1875 if (noop->noo_cred)
1876 kauth_cred_unref(&noop->noo_cred);
1877 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1878 FREE(noop, M_TEMP);
1879 }
1880
1881 /*
1882 * acquire a reference count on an open owner
1883 */
1884 void
1885 nfs_open_owner_ref(struct nfs_open_owner *noop)
1886 {
1887 lck_mtx_lock(&noop->noo_lock);
1888 noop->noo_refcnt++;
1889 lck_mtx_unlock(&noop->noo_lock);
1890 }
1891
1892 /*
1893 * drop a reference count on an open owner and destroy it if
1894 * it is no longer referenced and no longer on the mount's list.
1895 */
1896 void
1897 nfs_open_owner_rele(struct nfs_open_owner *noop)
1898 {
1899 lck_mtx_lock(&noop->noo_lock);
1900 if (noop->noo_refcnt < 1)
1901 panic("nfs_open_owner_rele: no refcnt");
1902 noop->noo_refcnt--;
1903 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1904 panic("nfs_open_owner_rele: busy");
1905 /* XXX we may potentially want to clean up idle/unused open owner structures */
1906 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1907 lck_mtx_unlock(&noop->noo_lock);
1908 return;
1909 }
1910 /* owner is no longer referenced or linked to mount, so destroy it */
1911 lck_mtx_unlock(&noop->noo_lock);
1912 nfs_open_owner_destroy(noop);
1913 }
1914
1915 /*
1916 * Mark an open owner as busy because we are about to
1917 * start an operation that uses and updates open owner state.
1918 */
1919 int
1920 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1921 {
1922 struct nfsmount *nmp;
1923 struct timespec ts = {2, 0};
1924 int error = 0, slpflag;
1925
1926 nmp = noop->noo_mount;
1927 if (nfs_mount_gone(nmp))
1928 return (ENXIO);
1929 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1930
1931 lck_mtx_lock(&noop->noo_lock);
1932 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1933 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1934 break;
1935 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1936 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
1937 slpflag = 0;
1938 }
1939 if (!error)
1940 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1941 lck_mtx_unlock(&noop->noo_lock);
1942
1943 return (error);
1944 }
1945
1946 /*
1947 * Clear the busy flag on an open owner and wake up anyone waiting
1948 * to mark it busy.
1949 */
1950 void
1951 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1952 {
1953 int wanted;
1954
1955 lck_mtx_lock(&noop->noo_lock);
1956 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1957 panic("nfs_open_owner_clear_busy");
1958 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1959 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1960 lck_mtx_unlock(&noop->noo_lock);
1961 if (wanted)
1962 wakeup(noop);
1963 }
1964
1965 /*
1966 * Given an open/lock owner and an error code, increment the
1967 * sequence ID if appropriate.
1968 */
1969 void
1970 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1971 {
1972 switch (error) {
1973 case NFSERR_STALE_CLIENTID:
1974 case NFSERR_STALE_STATEID:
1975 case NFSERR_OLD_STATEID:
1976 case NFSERR_BAD_STATEID:
1977 case NFSERR_BAD_SEQID:
1978 case NFSERR_BADXDR:
1979 case NFSERR_RESOURCE:
1980 case NFSERR_NOFILEHANDLE:
1981 /* do not increment the open seqid on these errors */
1982 return;
1983 }
1984 if (noop)
1985 noop->noo_seqid++;
1986 if (nlop)
1987 nlop->nlo_seqid++;
1988 }
1989
1990 /*
1991 * Search a node's open file list for any conflicts with this request.
1992 * Also find this open owner's open file structure.
1993 * If not found and "alloc" is set, then allocate one.
1994 */
1995 int
1996 nfs_open_file_find(
1997 nfsnode_t np,
1998 struct nfs_open_owner *noop,
1999 struct nfs_open_file **nofpp,
2000 uint32_t accessMode,
2001 uint32_t denyMode,
2002 int alloc)
2003 {
2004 *nofpp = NULL;
2005 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2006 }
2007
2008 /*
2009 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2010 * if an existing one is not found. This is used in "create" scenarios to
2011 * officially add the provisional nofp to the node once the node is created.
2012 */
2013 int
2014 nfs_open_file_find_internal(
2015 nfsnode_t np,
2016 struct nfs_open_owner *noop,
2017 struct nfs_open_file **nofpp,
2018 uint32_t accessMode,
2019 uint32_t denyMode,
2020 int alloc)
2021 {
2022 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2023
2024 if (!np)
2025 goto alloc;
2026 tryagain:
2027 lck_mtx_lock(&np->n_openlock);
2028 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2029 if (nofp2->nof_owner == noop) {
2030 nofp = nofp2;
2031 if (!accessMode)
2032 break;
2033 }
2034 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2035 /* This request conflicts with an existing open on this client. */
2036 lck_mtx_unlock(&np->n_openlock);
2037 return (EACCES);
2038 }
2039 }
2040
2041 /*
2042 * If this open owner doesn't have an open
2043 * file structure yet, we create one for it.
2044 */
2045 if (!nofp && !*nofpp && !newnofp && alloc) {
2046 lck_mtx_unlock(&np->n_openlock);
2047 alloc:
2048 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2049 if (!newnofp)
2050 return (ENOMEM);
2051 bzero(newnofp, sizeof(*newnofp));
2052 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2053 newnofp->nof_owner = noop;
2054 nfs_open_owner_ref(noop);
2055 newnofp->nof_np = np;
2056 lck_mtx_lock(&noop->noo_lock);
2057 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2058 lck_mtx_unlock(&noop->noo_lock);
2059 if (np)
2060 goto tryagain;
2061 }
2062 if (!nofp) {
2063 if (*nofpp) {
2064 (*nofpp)->nof_np = np;
2065 nofp = *nofpp;
2066 } else {
2067 nofp = newnofp;
2068 }
2069 if (nofp && np)
2070 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2071 }
2072 if (np)
2073 lck_mtx_unlock(&np->n_openlock);
2074
2075 if (alloc && newnofp && (nofp != newnofp))
2076 nfs_open_file_destroy(newnofp);
2077
2078 *nofpp = nofp;
2079 return (nofp ? 0 : ESRCH);
2080 }
2081
2082 /*
2083 * Destroy an open file structure.
2084 */
2085 void
2086 nfs_open_file_destroy(struct nfs_open_file *nofp)
2087 {
2088 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2089 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2090 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2091 nfs_open_owner_rele(nofp->nof_owner);
2092 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2093 FREE(nofp, M_TEMP);
2094 }
2095
2096 /*
2097 * Mark an open file as busy because we are about to
2098 * start an operation that uses and updates open file state.
2099 */
2100 int
2101 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2102 {
2103 struct nfsmount *nmp;
2104 struct timespec ts = {2, 0};
2105 int error = 0, slpflag;
2106
2107 nmp = nofp->nof_owner->noo_mount;
2108 if (nfs_mount_gone(nmp))
2109 return (ENXIO);
2110 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2111
2112 lck_mtx_lock(&nofp->nof_lock);
2113 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2114 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2115 break;
2116 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2117 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2118 slpflag = 0;
2119 }
2120 if (!error)
2121 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2122 lck_mtx_unlock(&nofp->nof_lock);
2123
2124 return (error);
2125 }
2126
2127 /*
2128 * Clear the busy flag on an open file and wake up anyone waiting
2129 * to mark it busy.
2130 */
2131 void
2132 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2133 {
2134 int wanted;
2135
2136 lck_mtx_lock(&nofp->nof_lock);
2137 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
2138 panic("nfs_open_file_clear_busy");
2139 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2140 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
2141 lck_mtx_unlock(&nofp->nof_lock);
2142 if (wanted)
2143 wakeup(nofp);
2144 }
2145
2146 /*
2147 * Add the open state for the given access/deny modes to this open file.
2148 */
2149 void
2150 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2151 {
2152 lck_mtx_lock(&nofp->nof_lock);
2153 nofp->nof_access |= accessMode;
2154 nofp->nof_deny |= denyMode;
2155
2156 if (delegated) {
2157 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2158 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2159 nofp->nof_d_r++;
2160 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2161 nofp->nof_d_w++;
2162 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2163 nofp->nof_d_rw++;
2164 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2165 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2166 nofp->nof_d_r_dw++;
2167 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2168 nofp->nof_d_w_dw++;
2169 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2170 nofp->nof_d_rw_dw++;
2171 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2172 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2173 nofp->nof_d_r_drw++;
2174 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2175 nofp->nof_d_w_drw++;
2176 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2177 nofp->nof_d_rw_drw++;
2178 }
2179 } else {
2180 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2181 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2182 nofp->nof_r++;
2183 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2184 nofp->nof_w++;
2185 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2186 nofp->nof_rw++;
2187 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2188 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2189 nofp->nof_r_dw++;
2190 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2191 nofp->nof_w_dw++;
2192 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2193 nofp->nof_rw_dw++;
2194 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2195 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2196 nofp->nof_r_drw++;
2197 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2198 nofp->nof_w_drw++;
2199 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2200 nofp->nof_rw_drw++;
2201 }
2202 }
2203
2204 nofp->nof_opencnt++;
2205 lck_mtx_unlock(&nofp->nof_lock);
2206 }
2207
2208 /*
2209 * Find which particular open combo will be closed and report what
2210 * the new modes will be and whether the open was delegated.
2211 */
2212 void
2213 nfs_open_file_remove_open_find(
2214 struct nfs_open_file *nofp,
2215 uint32_t accessMode,
2216 uint32_t denyMode,
2217 uint32_t *newAccessMode,
2218 uint32_t *newDenyMode,
2219 int *delegated)
2220 {
2221 /*
2222 * Calculate new modes: a mode bit gets removed when there's only
2223 * one count in all the corresponding counts
2224 */
2225 *newAccessMode = nofp->nof_access;
2226 *newDenyMode = nofp->nof_deny;
2227
2228 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2229 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2230 ((nofp->nof_r + nofp->nof_d_r +
2231 nofp->nof_rw + nofp->nof_d_rw +
2232 nofp->nof_r_dw + nofp->nof_d_r_dw +
2233 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2234 nofp->nof_r_drw + nofp->nof_d_r_drw +
2235 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2236 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2237 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2238 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2239 ((nofp->nof_w + nofp->nof_d_w +
2240 nofp->nof_rw + nofp->nof_d_rw +
2241 nofp->nof_w_dw + nofp->nof_d_w_dw +
2242 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2243 nofp->nof_w_drw + nofp->nof_d_w_drw +
2244 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2245 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2246 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2247 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2248 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2249 nofp->nof_w_drw + nofp->nof_d_w_drw +
2250 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1))
2251 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2252 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2253 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2254 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2255 nofp->nof_w_drw + nofp->nof_d_w_drw +
2256 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2257 nofp->nof_r_dw + nofp->nof_d_r_dw +
2258 nofp->nof_w_dw + nofp->nof_d_w_dw +
2259 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2260 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2261
2262 /* Find the corresponding open access/deny mode counter. */
2263 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2264 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2265 *delegated = (nofp->nof_d_r != 0);
2266 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2267 *delegated = (nofp->nof_d_w != 0);
2268 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2269 *delegated = (nofp->nof_d_rw != 0);
2270 else
2271 *delegated = 0;
2272 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2273 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2274 *delegated = (nofp->nof_d_r_dw != 0);
2275 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2276 *delegated = (nofp->nof_d_w_dw != 0);
2277 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2278 *delegated = (nofp->nof_d_rw_dw != 0);
2279 else
2280 *delegated = 0;
2281 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2282 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2283 *delegated = (nofp->nof_d_r_drw != 0);
2284 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2285 *delegated = (nofp->nof_d_w_drw != 0);
2286 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2287 *delegated = (nofp->nof_d_rw_drw != 0);
2288 else
2289 *delegated = 0;
2290 }
2291 }
2292
2293 /*
2294 * Remove the open state for the given access/deny modes to this open file.
2295 */
2296 void
2297 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2298 {
2299 uint32_t newAccessMode, newDenyMode;
2300 int delegated = 0;
2301
2302 lck_mtx_lock(&nofp->nof_lock);
2303 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2304
2305 /* Decrement the corresponding open access/deny mode counter. */
2306 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2307 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2308 if (delegated) {
2309 if (nofp->nof_d_r == 0)
2310 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2311 else
2312 nofp->nof_d_r--;
2313 } else {
2314 if (nofp->nof_r == 0)
2315 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2316 else
2317 nofp->nof_r--;
2318 }
2319 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2320 if (delegated) {
2321 if (nofp->nof_d_w == 0)
2322 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2323 else
2324 nofp->nof_d_w--;
2325 } else {
2326 if (nofp->nof_w == 0)
2327 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2328 else
2329 nofp->nof_w--;
2330 }
2331 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2332 if (delegated) {
2333 if (nofp->nof_d_rw == 0)
2334 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2335 else
2336 nofp->nof_d_rw--;
2337 } else {
2338 if (nofp->nof_rw == 0)
2339 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2340 else
2341 nofp->nof_rw--;
2342 }
2343 }
2344 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2345 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2346 if (delegated) {
2347 if (nofp->nof_d_r_dw == 0)
2348 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2349 else
2350 nofp->nof_d_r_dw--;
2351 } else {
2352 if (nofp->nof_r_dw == 0)
2353 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2354 else
2355 nofp->nof_r_dw--;
2356 }
2357 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2358 if (delegated) {
2359 if (nofp->nof_d_w_dw == 0)
2360 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2361 else
2362 nofp->nof_d_w_dw--;
2363 } else {
2364 if (nofp->nof_w_dw == 0)
2365 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2366 else
2367 nofp->nof_w_dw--;
2368 }
2369 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2370 if (delegated) {
2371 if (nofp->nof_d_rw_dw == 0)
2372 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2373 else
2374 nofp->nof_d_rw_dw--;
2375 } else {
2376 if (nofp->nof_rw_dw == 0)
2377 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2378 else
2379 nofp->nof_rw_dw--;
2380 }
2381 }
2382 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2383 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2384 if (delegated) {
2385 if (nofp->nof_d_r_drw == 0)
2386 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2387 else
2388 nofp->nof_d_r_drw--;
2389 } else {
2390 if (nofp->nof_r_drw == 0)
2391 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2392 else
2393 nofp->nof_r_drw--;
2394 }
2395 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2396 if (delegated) {
2397 if (nofp->nof_d_w_drw == 0)
2398 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2399 else
2400 nofp->nof_d_w_drw--;
2401 } else {
2402 if (nofp->nof_w_drw == 0)
2403 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2404 else
2405 nofp->nof_w_drw--;
2406 }
2407 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2408 if (delegated) {
2409 if (nofp->nof_d_rw_drw == 0)
2410 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2411 else
2412 nofp->nof_d_rw_drw--;
2413 } else {
2414 if (nofp->nof_rw_drw == 0)
2415 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2416 else
2417 nofp->nof_rw_drw--;
2418 }
2419 }
2420 }
2421
2422 /* update the modes */
2423 nofp->nof_access = newAccessMode;
2424 nofp->nof_deny = newDenyMode;
2425 nofp->nof_opencnt--;
2426 lck_mtx_unlock(&nofp->nof_lock);
2427 }
2428
2429
2430 /*
2431 * Get the current (delegation, lock, open, default) stateid for this node.
2432 * If node has a delegation, use that stateid.
2433 * If pid has a lock, use the lockowner's stateid.
2434 * Or use the open file's stateid.
2435 * If no open file, use a default stateid of all ones.
2436 */
2437 void
2438 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2439 {
2440 struct nfsmount *nmp = NFSTONMP(np);
2441 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2442 struct nfs_open_owner *noop = NULL;
2443 struct nfs_open_file *nofp = NULL;
2444 struct nfs_lock_owner *nlop = NULL;
2445 nfs_stateid *s = NULL;
2446
2447 if (np->n_openflags & N_DELEG_MASK) {
2448 s = &np->n_dstateid;
2449 } else {
2450 if (p)
2451 nlop = nfs_lock_owner_find(np, p, 0);
2452 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2453 /* we hold locks, use lock stateid */
2454 s = &nlop->nlo_stateid;
2455 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2456 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2457 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2458 nofp->nof_access) {
2459 /* we (should) have the file open, use open stateid */
2460 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
2461 nfs4_reopen(nofp, thd);
2462 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2463 s = &nofp->nof_stateid;
2464 }
2465 }
2466
2467 if (s) {
2468 sid->seqid = s->seqid;
2469 sid->other[0] = s->other[0];
2470 sid->other[1] = s->other[1];
2471 sid->other[2] = s->other[2];
2472 } else {
2473 /* named attributes may not have a stateid for reads, so don't complain for them */
2474 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
2475 NP(np, "nfs_get_stateid: no stateid");
2476 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2477 }
2478 if (nlop)
2479 nfs_lock_owner_rele(nlop);
2480 if (noop)
2481 nfs_open_owner_rele(noop);
2482 }
2483
2484
2485 /*
2486 * When we have a delegation, we may be able to perform the OPEN locally.
2487 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2488 */
2489 int
2490 nfs4_open_delegated(
2491 nfsnode_t np,
2492 struct nfs_open_file *nofp,
2493 uint32_t accessMode,
2494 uint32_t denyMode,
2495 vfs_context_t ctx)
2496 {
2497 int error = 0, ismember, readtoo = 0, authorized = 0;
2498 uint32_t action;
2499 struct kauth_acl_eval eval;
2500 kauth_cred_t cred = vfs_context_ucred(ctx);
2501
2502 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2503 /*
2504 * Try to open it for read access too,
2505 * so the buffer cache can read data.
2506 */
2507 readtoo = 1;
2508 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2509 }
2510
2511 tryagain:
2512 action = 0;
2513 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ)
2514 action |= KAUTH_VNODE_READ_DATA;
2515 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE)
2516 action |= KAUTH_VNODE_WRITE_DATA;
2517
2518 /* evaluate ACE (if we have one) */
2519 if (np->n_dace.ace_flags) {
2520 eval.ae_requested = action;
2521 eval.ae_acl = &np->n_dace;
2522 eval.ae_count = 1;
2523 eval.ae_options = 0;
2524 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred))
2525 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2526 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2527 if (!error && ismember)
2528 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2529
2530 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2531 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2532 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2533 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2534
2535 error = kauth_acl_evaluate(cred, &eval);
2536
2537 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW))
2538 authorized = 1;
2539 }
2540
2541 if (!authorized) {
2542 /* need to ask the server via ACCESS */
2543 struct vnop_access_args naa;
2544 naa.a_desc = &vnop_access_desc;
2545 naa.a_vp = NFSTOV(np);
2546 naa.a_action = action;
2547 naa.a_context = ctx;
2548 if (!(error = nfs_vnop_access(&naa)))
2549 authorized = 1;
2550 }
2551
2552 if (!authorized) {
2553 if (readtoo) {
2554 /* try again without the extra read access */
2555 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2556 readtoo = 0;
2557 goto tryagain;
2558 }
2559 return (error ? error : EACCES);
2560 }
2561
2562 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2563
2564 return (0);
2565 }
2566
2567
2568 /*
2569 * Open a file with the given access/deny modes.
2570 *
2571 * If we have a delegation, we may be able to handle the open locally.
2572 * Otherwise, we will always send the open RPC even if this open's mode is
2573 * a subset of all the existing opens. This makes sure that we will always
2574 * be able to do a downgrade to any of the open modes.
2575 *
2576 * Note: local conflicts should have already been checked in nfs_open_file_find().
2577 */
2578 int
2579 nfs4_open(
2580 nfsnode_t np,
2581 struct nfs_open_file *nofp,
2582 uint32_t accessMode,
2583 uint32_t denyMode,
2584 vfs_context_t ctx)
2585 {
2586 vnode_t vp = NFSTOV(np);
2587 vnode_t dvp = NULL;
2588 struct componentname cn;
2589 const char *vname = NULL;
2590 size_t namelen;
2591 char smallname[128];
2592 char *filename = NULL;
2593 int error = 0, readtoo = 0;
2594
2595 /*
2596 * We can handle the OPEN ourselves if we have a delegation,
2597 * unless it's a read delegation and the open is asking for
2598 * either write access or deny read. We also don't bother to
2599 * use the delegation if it's being returned.
2600 */
2601 if (np->n_openflags & N_DELEG_MASK) {
2602 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
2603 return (error);
2604 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2605 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2606 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2607 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2608 nfs_open_state_clear_busy(np);
2609 return (error);
2610 }
2611 nfs_open_state_clear_busy(np);
2612 }
2613
2614 /*
2615 * [sigh] We can't trust VFS to get the parent right for named
2616 * attribute nodes. (It likes to reparent the nodes after we've
2617 * created them.) Luckily we can probably get the right parent
2618 * from the n_parent we have stashed away.
2619 */
2620 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2621 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
2622 dvp = NULL;
2623 if (!dvp)
2624 dvp = vnode_getparent(vp);
2625 vname = vnode_getname(vp);
2626 if (!dvp || !vname) {
2627 if (!error)
2628 error = EIO;
2629 goto out;
2630 }
2631 filename = &smallname[0];
2632 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2633 if (namelen >= sizeof(smallname)) {
2634 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
2635 if (!filename) {
2636 error = ENOMEM;
2637 goto out;
2638 }
2639 snprintf(filename, namelen+1, "%s", vname);
2640 }
2641 bzero(&cn, sizeof(cn));
2642 cn.cn_nameptr = filename;
2643 cn.cn_namelen = namelen;
2644
2645 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2646 /*
2647 * Try to open it for read access too,
2648 * so the buffer cache can read data.
2649 */
2650 readtoo = 1;
2651 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2652 }
2653 tryagain:
2654 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2655 if (error) {
2656 if (!nfs_mount_state_error_should_restart(error) &&
2657 (error != EINTR) && (error != ERESTART) && readtoo) {
2658 /* try again without the extra read access */
2659 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2660 readtoo = 0;
2661 goto tryagain;
2662 }
2663 goto out;
2664 }
2665 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2666 out:
2667 if (filename && (filename != &smallname[0]))
2668 FREE(filename, M_TEMP);
2669 if (vname)
2670 vnode_putname(vname);
2671 if (dvp != NULLVP)
2672 vnode_put(dvp);
2673 return (error);
2674 }
2675
2676 int
2677 nfs_vnop_mmap(
2678 struct vnop_mmap_args /* {
2679 struct vnodeop_desc *a_desc;
2680 vnode_t a_vp;
2681 int a_fflags;
2682 vfs_context_t a_context;
2683 } */ *ap)
2684 {
2685 vfs_context_t ctx = ap->a_context;
2686 vnode_t vp = ap->a_vp;
2687 nfsnode_t np = VTONFS(vp);
2688 int error = 0, accessMode, denyMode, delegated;
2689 struct nfsmount *nmp;
2690 struct nfs_open_owner *noop = NULL;
2691 struct nfs_open_file *nofp = NULL;
2692
2693 nmp = VTONMP(vp);
2694 if (nfs_mount_gone(nmp))
2695 return (ENXIO);
2696
2697 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2698 return (EINVAL);
2699 if (np->n_flag & NREVOKE)
2700 return (EIO);
2701
2702 /*
2703 * fflags contains some combination of: PROT_READ, PROT_WRITE
2704 * Since it's not possible to mmap() without having the file open for reading,
2705 * read access is always there (regardless if PROT_READ is not set).
2706 */
2707 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2708 if (ap->a_fflags & PROT_WRITE)
2709 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2710 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2711
2712 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2713 if (!noop)
2714 return (ENOMEM);
2715
2716 restart:
2717 error = nfs_mount_state_in_use_start(nmp, NULL);
2718 if (error) {
2719 nfs_open_owner_rele(noop);
2720 return (error);
2721 }
2722 if (np->n_flag & NREVOKE) {
2723 error = EIO;
2724 nfs_mount_state_in_use_end(nmp, 0);
2725 nfs_open_owner_rele(noop);
2726 return (error);
2727 }
2728
2729 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2730 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2731 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2732 error = EPERM;
2733 }
2734 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2735 nfs_mount_state_in_use_end(nmp, 0);
2736 error = nfs4_reopen(nofp, NULL);
2737 nofp = NULL;
2738 if (!error)
2739 goto restart;
2740 }
2741 if (!error)
2742 error = nfs_open_file_set_busy(nofp, NULL);
2743 if (error) {
2744 nofp = NULL;
2745 goto out;
2746 }
2747
2748 /*
2749 * The open reference for mmap must mirror an existing open because
2750 * we may need to reclaim it after the file is closed.
2751 * So grab another open count matching the accessMode passed in.
2752 * If we already had an mmap open, prefer read/write without deny mode.
2753 * This means we may have to drop the current mmap open first.
2754 *
2755 * N.B. We should have an open for the mmap, because, mmap was
2756 * called on an open descriptor, or we've created an open for read
2757 * from reading the first page for execve. However, if we piggy
2758 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2759 * that open may have closed.
2760 */
2761
2762 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2763 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2764 /* We shouldn't get here. We've already open the file for execve */
2765 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2766 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2767 }
2768 /*
2769 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2770 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2771 */
2772 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2773 /* not asking for just read access -> fail */
2774 error = EPERM;
2775 goto out;
2776 }
2777 /* we don't have the file open, so open it for read access */
2778 if (nmp->nm_vers < NFS_VER4) {
2779 /* NFS v2/v3 opens are always allowed - so just add it. */
2780 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
2781 error = 0;
2782 } else {
2783 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
2784 }
2785 if (!error)
2786 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
2787 if (error)
2788 goto out;
2789 }
2790
2791 /* determine deny mode for open */
2792 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2793 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2794 delegated = 1;
2795 if (nofp->nof_d_rw)
2796 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2797 else if (nofp->nof_d_rw_dw)
2798 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2799 else if (nofp->nof_d_rw_drw)
2800 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2801 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2802 delegated = 0;
2803 if (nofp->nof_rw)
2804 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2805 else if (nofp->nof_rw_dw)
2806 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2807 else if (nofp->nof_rw_drw)
2808 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2809 } else {
2810 error = EPERM;
2811 }
2812 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2813 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
2814 delegated = 1;
2815 if (nofp->nof_d_r)
2816 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2817 else if (nofp->nof_d_r_dw)
2818 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2819 else if (nofp->nof_d_r_drw)
2820 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2821 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
2822 delegated = 0;
2823 if (nofp->nof_r)
2824 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2825 else if (nofp->nof_r_dw)
2826 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2827 else if (nofp->nof_r_drw)
2828 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2829 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2830 /*
2831 * This clause and the one below is to co-opt a read write access
2832 * for a read only mmaping. We probably got here in that an
2833 * existing rw open for an executable file already exists.
2834 */
2835 delegated = 1;
2836 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2837 if (nofp->nof_d_rw)
2838 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2839 else if (nofp->nof_d_rw_dw)
2840 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2841 else if (nofp->nof_d_rw_drw)
2842 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2843 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2844 delegated = 0;
2845 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2846 if (nofp->nof_rw)
2847 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2848 else if (nofp->nof_rw_dw)
2849 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2850 else if (nofp->nof_rw_drw)
2851 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2852 } else {
2853 error = EPERM;
2854 }
2855 }
2856 if (error) /* mmap mode without proper open mode */
2857 goto out;
2858
2859 /*
2860 * If the existing mmap access is more than the new access OR the
2861 * existing access is the same and the existing deny mode is less,
2862 * then we'll stick with the existing mmap open mode.
2863 */
2864 if ((nofp->nof_mmap_access > accessMode) ||
2865 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2866 goto out;
2867
2868 /* update mmap open mode */
2869 if (nofp->nof_mmap_access) {
2870 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2871 if (error) {
2872 if (!nfs_mount_state_error_should_restart(error))
2873 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2874 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2875 goto out;
2876 }
2877 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2878 }
2879
2880 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
2881 nofp->nof_mmap_access = accessMode;
2882 nofp->nof_mmap_deny = denyMode;
2883
2884 out:
2885 if (nofp)
2886 nfs_open_file_clear_busy(nofp);
2887 if (nfs_mount_state_in_use_end(nmp, error)) {
2888 nofp = NULL;
2889 goto restart;
2890 }
2891 if (noop)
2892 nfs_open_owner_rele(noop);
2893
2894 if (!error) {
2895 int ismapped = 0;
2896 nfs_node_lock_force(np);
2897 if ((np->n_flag & NISMAPPED) == 0) {
2898 np->n_flag |= NISMAPPED;
2899 ismapped = 1;
2900 }
2901 nfs_node_unlock(np);
2902 if (ismapped) {
2903 lck_mtx_lock(&nmp->nm_lock);
2904 nmp->nm_state &= ~NFSSTA_SQUISHY;
2905 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
2906 if (nmp->nm_curdeadtimeout <= 0)
2907 nmp->nm_deadto_start = 0;
2908 nmp->nm_mappers++;
2909 lck_mtx_unlock(&nmp->nm_lock);
2910 }
2911 }
2912
2913 return (error);
2914 }
2915
2916
2917 int
2918 nfs_vnop_mnomap(
2919 struct vnop_mnomap_args /* {
2920 struct vnodeop_desc *a_desc;
2921 vnode_t a_vp;
2922 vfs_context_t a_context;
2923 } */ *ap)
2924 {
2925 vfs_context_t ctx = ap->a_context;
2926 vnode_t vp = ap->a_vp;
2927 nfsnode_t np = VTONFS(vp);
2928 struct nfsmount *nmp;
2929 struct nfs_open_file *nofp = NULL;
2930 off_t size;
2931 int error;
2932 int is_mapped_flag = 0;
2933
2934 nmp = VTONMP(vp);
2935 if (nfs_mount_gone(nmp))
2936 return (ENXIO);
2937
2938 nfs_node_lock_force(np);
2939 if (np->n_flag & NISMAPPED) {
2940 is_mapped_flag = 1;
2941 np->n_flag &= ~NISMAPPED;
2942 }
2943 nfs_node_unlock(np);
2944 if (is_mapped_flag) {
2945 lck_mtx_lock(&nmp->nm_lock);
2946 if (nmp->nm_mappers)
2947 nmp->nm_mappers--;
2948 else
2949 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2950 lck_mtx_unlock(&nmp->nm_lock);
2951 }
2952
2953 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2954 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
2955 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
2956 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
2957
2958 /* walk all open files and close all mmap opens */
2959 loop:
2960 error = nfs_mount_state_in_use_start(nmp, NULL);
2961 if (error)
2962 return (error);
2963 lck_mtx_lock(&np->n_openlock);
2964 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2965 if (!nofp->nof_mmap_access)
2966 continue;
2967 lck_mtx_unlock(&np->n_openlock);
2968 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2969 nfs_mount_state_in_use_end(nmp, 0);
2970 error = nfs4_reopen(nofp, NULL);
2971 if (!error)
2972 goto loop;
2973 }
2974 if (!error)
2975 error = nfs_open_file_set_busy(nofp, NULL);
2976 if (error) {
2977 lck_mtx_lock(&np->n_openlock);
2978 break;
2979 }
2980 if (nofp->nof_mmap_access) {
2981 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2982 if (!nfs_mount_state_error_should_restart(error)) {
2983 if (error) /* not a state-operation-restarting error, so just clear the access */
2984 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2985 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2986 }
2987 if (error)
2988 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2989 }
2990 nfs_open_file_clear_busy(nofp);
2991 nfs_mount_state_in_use_end(nmp, error);
2992 goto loop;
2993 }
2994 lck_mtx_unlock(&np->n_openlock);
2995 nfs_mount_state_in_use_end(nmp, error);
2996 return (error);
2997 }
2998
2999 /*
3000 * Search a node's lock owner list for the owner for this process.
3001 * If not found and "alloc" is set, then allocate a new one.
3002 */
3003 struct nfs_lock_owner *
3004 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3005 {
3006 pid_t pid = proc_pid(p);
3007 struct nfs_lock_owner *nlop, *newnlop = NULL;
3008
3009 tryagain:
3010 lck_mtx_lock(&np->n_openlock);
3011 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3012 if (nlop->nlo_pid != pid)
3013 continue;
3014 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
3015 break;
3016 /* stale lock owner... reuse it if we can */
3017 if (nlop->nlo_refcnt) {
3018 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3019 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3020 lck_mtx_unlock(&np->n_openlock);
3021 goto tryagain;
3022 }
3023 nlop->nlo_pid_start = p->p_start;
3024 nlop->nlo_seqid = 0;
3025 nlop->nlo_stategenid = 0;
3026 break;
3027 }
3028
3029 if (!nlop && !newnlop && alloc) {
3030 lck_mtx_unlock(&np->n_openlock);
3031 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3032 if (!newnlop)
3033 return (NULL);
3034 bzero(newnlop, sizeof(*newnlop));
3035 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3036 newnlop->nlo_pid = pid;
3037 newnlop->nlo_pid_start = p->p_start;
3038 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3039 TAILQ_INIT(&newnlop->nlo_locks);
3040 goto tryagain;
3041 }
3042 if (!nlop && newnlop) {
3043 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3044 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3045 nlop = newnlop;
3046 }
3047 lck_mtx_unlock(&np->n_openlock);
3048
3049 if (newnlop && (nlop != newnlop))
3050 nfs_lock_owner_destroy(newnlop);
3051
3052 if (nlop)
3053 nfs_lock_owner_ref(nlop);
3054
3055 return (nlop);
3056 }
3057
3058 /*
3059 * destroy a lock owner that's no longer needed
3060 */
3061 void
3062 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3063 {
3064 if (nlop->nlo_open_owner) {
3065 nfs_open_owner_rele(nlop->nlo_open_owner);
3066 nlop->nlo_open_owner = NULL;
3067 }
3068 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3069 FREE(nlop, M_TEMP);
3070 }
3071
3072 /*
3073 * acquire a reference count on a lock owner
3074 */
3075 void
3076 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3077 {
3078 lck_mtx_lock(&nlop->nlo_lock);
3079 nlop->nlo_refcnt++;
3080 lck_mtx_unlock(&nlop->nlo_lock);
3081 }
3082
3083 /*
3084 * drop a reference count on a lock owner and destroy it if
3085 * it is no longer referenced and no longer on the mount's list.
3086 */
3087 void
3088 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3089 {
3090 lck_mtx_lock(&nlop->nlo_lock);
3091 if (nlop->nlo_refcnt < 1)
3092 panic("nfs_lock_owner_rele: no refcnt");
3093 nlop->nlo_refcnt--;
3094 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3095 panic("nfs_lock_owner_rele: busy");
3096 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3097 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3098 lck_mtx_unlock(&nlop->nlo_lock);
3099 return;
3100 }
3101 /* owner is no longer referenced or linked to mount, so destroy it */
3102 lck_mtx_unlock(&nlop->nlo_lock);
3103 nfs_lock_owner_destroy(nlop);
3104 }
3105
3106 /*
3107 * Mark a lock owner as busy because we are about to
3108 * start an operation that uses and updates lock owner state.
3109 */
3110 int
3111 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3112 {
3113 struct nfsmount *nmp;
3114 struct timespec ts = {2, 0};
3115 int error = 0, slpflag;
3116
3117 nmp = nlop->nlo_open_owner->noo_mount;
3118 if (nfs_mount_gone(nmp))
3119 return (ENXIO);
3120 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3121
3122 lck_mtx_lock(&nlop->nlo_lock);
3123 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3124 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
3125 break;
3126 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3127 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3128 slpflag = 0;
3129 }
3130 if (!error)
3131 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3132 lck_mtx_unlock(&nlop->nlo_lock);
3133
3134 return (error);
3135 }
3136
3137 /*
3138 * Clear the busy flag on a lock owner and wake up anyone waiting
3139 * to mark it busy.
3140 */
3141 void
3142 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3143 {
3144 int wanted;
3145
3146 lck_mtx_lock(&nlop->nlo_lock);
3147 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3148 panic("nfs_lock_owner_clear_busy");
3149 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3150 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
3151 lck_mtx_unlock(&nlop->nlo_lock);
3152 if (wanted)
3153 wakeup(nlop);
3154 }
3155
3156 /*
3157 * Insert a held lock into a lock owner's sorted list.
3158 * (flock locks are always inserted at the head the list)
3159 */
3160 void
3161 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3162 {
3163 struct nfs_file_lock *nflp;
3164
3165 /* insert new lock in lock owner's held lock list */
3166 lck_mtx_lock(&nlop->nlo_lock);
3167 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3168 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3169 } else {
3170 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3171 if (newnflp->nfl_start < nflp->nfl_start)
3172 break;
3173 }
3174 if (nflp)
3175 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3176 else
3177 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3178 }
3179 lck_mtx_unlock(&nlop->nlo_lock);
3180 }
3181
3182 /*
3183 * Get a file lock structure for this lock owner.
3184 */
3185 struct nfs_file_lock *
3186 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3187 {
3188 struct nfs_file_lock *nflp = NULL;
3189
3190 lck_mtx_lock(&nlop->nlo_lock);
3191 if (!nlop->nlo_alock.nfl_owner) {
3192 nflp = &nlop->nlo_alock;
3193 nflp->nfl_owner = nlop;
3194 }
3195 lck_mtx_unlock(&nlop->nlo_lock);
3196 if (!nflp) {
3197 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3198 if (!nflp)
3199 return (NULL);
3200 bzero(nflp, sizeof(*nflp));
3201 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3202 nflp->nfl_owner = nlop;
3203 }
3204 nfs_lock_owner_ref(nlop);
3205 return (nflp);
3206 }
3207
3208 /*
3209 * destroy the given NFS file lock structure
3210 */
3211 void
3212 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3213 {
3214 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3215
3216 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3217 nflp->nfl_owner = NULL;
3218 FREE(nflp, M_TEMP);
3219 } else {
3220 lck_mtx_lock(&nlop->nlo_lock);
3221 bzero(nflp, sizeof(*nflp));
3222 lck_mtx_unlock(&nlop->nlo_lock);
3223 }
3224 nfs_lock_owner_rele(nlop);
3225 }
3226
3227 /*
3228 * Check if one file lock conflicts with another.
3229 * (nflp1 is the new lock. nflp2 is the existing lock.)
3230 */
3231 int
3232 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3233 {
3234 /* no conflict if lock is dead */
3235 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
3236 return (0);
3237 /* no conflict if it's ours - unless the lock style doesn't match */
3238 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3239 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3240 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3241 (nflp1->nfl_start > nflp2->nfl_start) &&
3242 (nflp1->nfl_end < nflp2->nfl_end))
3243 *willsplit = 1;
3244 return (0);
3245 }
3246 /* no conflict if ranges don't overlap */
3247 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
3248 return (0);
3249 /* no conflict if neither lock is exclusive */
3250 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
3251 return (0);
3252 /* conflict */
3253 return (1);
3254 }
3255
3256 /*
3257 * Send an NFSv4 LOCK RPC to the server.
3258 */
3259 int
3260 nfs4_setlock_rpc(
3261 nfsnode_t np,
3262 struct nfs_open_file *nofp,
3263 struct nfs_file_lock *nflp,
3264 int reclaim,
3265 int flags,
3266 thread_t thd,
3267 kauth_cred_t cred)
3268 {
3269 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3270 struct nfsmount *nmp;
3271 struct nfsm_chain nmreq, nmrep;
3272 uint64_t xid;
3273 uint32_t locktype;
3274 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3275 struct nfsreq_secinfo_args si;
3276
3277 nmp = NFSTONMP(np);
3278 if (nfs_mount_gone(nmp))
3279 return (ENXIO);
3280 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3281 return (EINVAL);
3282
3283 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3284 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3285 ((nflp->nfl_type == F_WRLCK) ?
3286 NFS_LOCK_TYPE_WRITEW :
3287 NFS_LOCK_TYPE_READW) :
3288 ((nflp->nfl_type == F_WRLCK) ?
3289 NFS_LOCK_TYPE_WRITE :
3290 NFS_LOCK_TYPE_READ);
3291 if (newlocker) {
3292 error = nfs_open_file_set_busy(nofp, thd);
3293 if (error)
3294 return (error);
3295 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3296 if (error) {
3297 nfs_open_file_clear_busy(nofp);
3298 return (error);
3299 }
3300 if (!nlop->nlo_open_owner) {
3301 nfs_open_owner_ref(nofp->nof_owner);
3302 nlop->nlo_open_owner = nofp->nof_owner;
3303 }
3304 }
3305 error = nfs_lock_owner_set_busy(nlop, thd);
3306 if (error) {
3307 if (newlocker) {
3308 nfs_open_owner_clear_busy(nofp->nof_owner);
3309 nfs_open_file_clear_busy(nofp);
3310 }
3311 return (error);
3312 }
3313
3314 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3315 nfsm_chain_null(&nmreq);
3316 nfsm_chain_null(&nmrep);
3317
3318 // PUTFH, GETATTR, LOCK
3319 numops = 3;
3320 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3321 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3322 numops--;
3323 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3324 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3325 numops--;
3326 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3327 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3328 numops--;
3329 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3330 nfsm_chain_add_32(error, &nmreq, locktype);
3331 nfsm_chain_add_32(error, &nmreq, reclaim);
3332 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3333 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3334 nfsm_chain_add_32(error, &nmreq, newlocker);
3335 if (newlocker) {
3336 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3337 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3338 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3339 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3340 } else {
3341 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3342 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3343 }
3344 nfsm_chain_build_done(error, &nmreq);
3345 nfsm_assert(error, (numops == 0), EPROTO);
3346 nfsmout_if(error);
3347
3348 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3349
3350 if ((lockerror = nfs_node_lock(np)))
3351 error = lockerror;
3352 nfsm_chain_skip_tag(error, &nmrep);
3353 nfsm_chain_get_32(error, &nmrep, numops);
3354 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3355 nfsmout_if(error);
3356 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3357 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3358 nfsmout_if(error);
3359 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3360 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3361 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3362
3363 /* Update the lock owner's stategenid once it appears the server has state for it. */
3364 /* We determine this by noting the request was successful (we got a stateid). */
3365 if (newlocker && !error)
3366 nlop->nlo_stategenid = nmp->nm_stategenid;
3367 nfsmout:
3368 if (!lockerror)
3369 nfs_node_unlock(np);
3370 nfs_lock_owner_clear_busy(nlop);
3371 if (newlocker) {
3372 nfs_open_owner_clear_busy(nofp->nof_owner);
3373 nfs_open_file_clear_busy(nofp);
3374 }
3375 nfsm_chain_cleanup(&nmreq);
3376 nfsm_chain_cleanup(&nmrep);
3377 return (error);
3378 }
3379
3380 /*
3381 * Send an NFSv4 LOCKU RPC to the server.
3382 */
3383 int
3384 nfs4_unlock_rpc(
3385 nfsnode_t np,
3386 struct nfs_lock_owner *nlop,
3387 int type,
3388 uint64_t start,
3389 uint64_t end,
3390 int flags,
3391 thread_t thd,
3392 kauth_cred_t cred)
3393 {
3394 struct nfsmount *nmp;
3395 struct nfsm_chain nmreq, nmrep;
3396 uint64_t xid;
3397 int error = 0, lockerror = ENOENT, numops, status;
3398 struct nfsreq_secinfo_args si;
3399
3400 nmp = NFSTONMP(np);
3401 if (nfs_mount_gone(nmp))
3402 return (ENXIO);
3403 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3404 return (EINVAL);
3405
3406 error = nfs_lock_owner_set_busy(nlop, NULL);
3407 if (error)
3408 return (error);
3409
3410 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3411 nfsm_chain_null(&nmreq);
3412 nfsm_chain_null(&nmrep);
3413
3414 // PUTFH, GETATTR, LOCKU
3415 numops = 3;
3416 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3417 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3418 numops--;
3419 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3420 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3421 numops--;
3422 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3423 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3424 numops--;
3425 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3426 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3427 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3428 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3429 nfsm_chain_add_64(error, &nmreq, start);
3430 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3431 nfsm_chain_build_done(error, &nmreq);
3432 nfsm_assert(error, (numops == 0), EPROTO);
3433 nfsmout_if(error);
3434
3435 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3436
3437 if ((lockerror = nfs_node_lock(np)))
3438 error = lockerror;
3439 nfsm_chain_skip_tag(error, &nmrep);
3440 nfsm_chain_get_32(error, &nmrep, numops);
3441 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3442 nfsmout_if(error);
3443 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3444 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3445 nfsmout_if(error);
3446 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3447 nfs_owner_seqid_increment(NULL, nlop, error);
3448 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3449 nfsmout:
3450 if (!lockerror)
3451 nfs_node_unlock(np);
3452 nfs_lock_owner_clear_busy(nlop);
3453 nfsm_chain_cleanup(&nmreq);
3454 nfsm_chain_cleanup(&nmrep);
3455 return (error);
3456 }
3457
3458 /*
3459 * Send an NFSv4 LOCKT RPC to the server.
3460 */
3461 int
3462 nfs4_getlock_rpc(
3463 nfsnode_t np,
3464 struct nfs_lock_owner *nlop,
3465 struct flock *fl,
3466 uint64_t start,
3467 uint64_t end,
3468 vfs_context_t ctx)
3469 {
3470 struct nfsmount *nmp;
3471 struct nfsm_chain nmreq, nmrep;
3472 uint64_t xid, val64 = 0;
3473 uint32_t val = 0;
3474 int error = 0, lockerror, numops, status;
3475 struct nfsreq_secinfo_args si;
3476
3477 nmp = NFSTONMP(np);
3478 if (nfs_mount_gone(nmp))
3479 return (ENXIO);
3480 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3481 return (EINVAL);
3482
3483 lockerror = ENOENT;
3484 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3485 nfsm_chain_null(&nmreq);
3486 nfsm_chain_null(&nmrep);
3487
3488 // PUTFH, GETATTR, LOCKT
3489 numops = 3;
3490 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3491 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3492 numops--;
3493 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3494 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3495 numops--;
3496 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3497 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3498 numops--;
3499 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3500 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3501 nfsm_chain_add_64(error, &nmreq, start);
3502 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3503 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3504 nfsm_chain_build_done(error, &nmreq);
3505 nfsm_assert(error, (numops == 0), EPROTO);
3506 nfsmout_if(error);
3507
3508 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3509
3510 if ((lockerror = nfs_node_lock(np)))
3511 error = lockerror;
3512 nfsm_chain_skip_tag(error, &nmrep);
3513 nfsm_chain_get_32(error, &nmrep, numops);
3514 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3515 nfsmout_if(error);
3516 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3517 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3518 nfsmout_if(error);
3519 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3520 if (error == NFSERR_DENIED) {
3521 error = 0;
3522 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3523 nfsm_chain_get_64(error, &nmrep, val64);
3524 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3525 nfsm_chain_get_32(error, &nmrep, val);
3526 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3527 fl->l_pid = 0;
3528 fl->l_whence = SEEK_SET;
3529 } else if (!error) {
3530 fl->l_type = F_UNLCK;
3531 }
3532 nfsmout:
3533 if (!lockerror)
3534 nfs_node_unlock(np);
3535 nfsm_chain_cleanup(&nmreq);
3536 nfsm_chain_cleanup(&nmrep);
3537 return (error);
3538 }
3539
3540
3541 /*
3542 * Check for any conflicts with the given lock.
3543 *
3544 * Checking for a lock doesn't require the file to be opened.
3545 * So we skip all the open owner, open file, lock owner work
3546 * and just check for a conflicting lock.
3547 */
3548 int
3549 nfs_advlock_getlock(
3550 nfsnode_t np,
3551 struct nfs_lock_owner *nlop,
3552 struct flock *fl,
3553 uint64_t start,
3554 uint64_t end,
3555 vfs_context_t ctx)
3556 {
3557 struct nfsmount *nmp;
3558 struct nfs_file_lock *nflp;
3559 int error = 0, answered = 0;
3560
3561 nmp = NFSTONMP(np);
3562 if (nfs_mount_gone(nmp))
3563 return (ENXIO);
3564
3565 restart:
3566 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx))))
3567 return (error);
3568
3569 lck_mtx_lock(&np->n_openlock);
3570 /* scan currently held locks for conflict */
3571 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3572 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3573 continue;
3574 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3575 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3576 break;
3577 }
3578 if (nflp) {
3579 /* found a conflicting lock */
3580 fl->l_type = nflp->nfl_type;
3581 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3582 fl->l_start = nflp->nfl_start;
3583 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3584 fl->l_whence = SEEK_SET;
3585 answered = 1;
3586 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3587 /*
3588 * If we have a write delegation, we know there can't be other
3589 * locks on the server. So the answer is no conflicting lock found.
3590 */
3591 fl->l_type = F_UNLCK;
3592 answered = 1;
3593 }
3594 lck_mtx_unlock(&np->n_openlock);
3595 if (answered) {
3596 nfs_mount_state_in_use_end(nmp, 0);
3597 return (0);
3598 }
3599
3600 /* no conflict found locally, so ask the server */
3601 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3602
3603 if (nfs_mount_state_in_use_end(nmp, error))
3604 goto restart;
3605 return (error);
3606 }
3607
3608 /*
3609 * Acquire a file lock for the given range.
3610 *
3611 * Add the lock (request) to the lock queue.
3612 * Scan the lock queue for any conflicting locks.
3613 * If a conflict is found, block or return an error.
3614 * Once end of queue is reached, send request to the server.
3615 * If the server grants the lock, scan the lock queue and
3616 * update any existing locks. Then (optionally) scan the
3617 * queue again to coalesce any locks adjacent to the new one.
3618 */
3619 int
3620 nfs_advlock_setlock(
3621 nfsnode_t np,
3622 struct nfs_open_file *nofp,
3623 struct nfs_lock_owner *nlop,
3624 int op,
3625 uint64_t start,
3626 uint64_t end,
3627 int style,
3628 short type,
3629 vfs_context_t ctx)
3630 {
3631 struct nfsmount *nmp;
3632 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3633 struct nfs_file_lock *coalnflp;
3634 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3635 struct timespec ts = {1, 0};
3636
3637 nmp = NFSTONMP(np);
3638 if (nfs_mount_gone(nmp))
3639 return (ENXIO);
3640 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3641
3642 if ((type != F_RDLCK) && (type != F_WRLCK))
3643 return (EINVAL);
3644
3645 /* allocate a new lock */
3646 newnflp = nfs_file_lock_alloc(nlop);
3647 if (!newnflp)
3648 return (ENOLCK);
3649 newnflp->nfl_start = start;
3650 newnflp->nfl_end = end;
3651 newnflp->nfl_type = type;
3652 if (op == F_SETLKW)
3653 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3654 newnflp->nfl_flags |= style;
3655 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3656
3657 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3658 /*
3659 * For exclusive flock-style locks, if we block waiting for the
3660 * lock, we need to first release any currently held shared
3661 * flock-style lock. So, the first thing we do is check if we
3662 * have a shared flock-style lock.
3663 */
3664 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3665 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3666 nflp = NULL;
3667 if (nflp && (nflp->nfl_type != F_RDLCK))
3668 nflp = NULL;
3669 flocknflp = nflp;
3670 }
3671
3672 restart:
3673 restart = 0;
3674 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3675 if (error)
3676 goto error_out;
3677 inuse = 1;
3678 if (np->n_flag & NREVOKE) {
3679 error = EIO;
3680 nfs_mount_state_in_use_end(nmp, 0);
3681 inuse = 0;
3682 goto error_out;
3683 }
3684 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3685 nfs_mount_state_in_use_end(nmp, 0);
3686 inuse = 0;
3687 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3688 if (error)
3689 goto error_out;
3690 goto restart;
3691 }
3692
3693 lck_mtx_lock(&np->n_openlock);
3694 if (!inqueue) {
3695 /* insert new lock at beginning of list */
3696 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3697 inqueue = 1;
3698 }
3699
3700 /* scan current list of locks (held and pending) for conflicts */
3701 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3702 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3703 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3704 continue;
3705 /* Conflict */
3706 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3707 error = EAGAIN;
3708 break;
3709 }
3710 /* Block until this lock is no longer held. */
3711 if (nflp->nfl_blockcnt == UINT_MAX) {
3712 error = ENOLCK;
3713 break;
3714 }
3715 nflp->nfl_blockcnt++;
3716 do {
3717 if (flocknflp) {
3718 /* release any currently held shared lock before sleeping */
3719 lck_mtx_unlock(&np->n_openlock);
3720 nfs_mount_state_in_use_end(nmp, 0);
3721 inuse = 0;
3722 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3723 flocknflp = NULL;
3724 if (!error)
3725 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3726 if (error) {
3727 lck_mtx_lock(&np->n_openlock);
3728 break;
3729 }
3730 inuse = 1;
3731 lck_mtx_lock(&np->n_openlock);
3732 /* no need to block/sleep if the conflict is gone */
3733 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3734 break;
3735 }
3736 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
3737 slpflag = 0;
3738 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3739 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3740 /* looks like we have a recover pending... restart */
3741 restart = 1;
3742 lck_mtx_unlock(&np->n_openlock);
3743 nfs_mount_state_in_use_end(nmp, 0);
3744 inuse = 0;
3745 lck_mtx_lock(&np->n_openlock);
3746 break;
3747 }
3748 if (!error && (np->n_flag & NREVOKE))
3749 error = EIO;
3750 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3751 nflp->nfl_blockcnt--;
3752 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3753 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3754 nfs_file_lock_destroy(nflp);
3755 }
3756 if (error || restart)
3757 break;
3758 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3759 /* So, start this lock-scanning loop over from where it started. */
3760 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
3761 }
3762 lck_mtx_unlock(&np->n_openlock);
3763 if (restart)
3764 goto restart;
3765 if (error)
3766 goto error_out;
3767
3768 if (willsplit) {
3769 /*
3770 * It looks like this operation is splitting a lock.
3771 * We allocate a new lock now so we don't have to worry
3772 * about the allocation failing after we've updated some state.
3773 */
3774 nflp2 = nfs_file_lock_alloc(nlop);
3775 if (!nflp2) {
3776 error = ENOLCK;
3777 goto error_out;
3778 }
3779 }
3780
3781 /* once scan for local conflicts is clear, send request to server */
3782 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
3783 goto error_out;
3784 busy = 1;
3785 delay = 0;
3786 do {
3787 /* do we have a delegation? (that we're not returning?) */
3788 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
3789 if (np->n_openflags & N_DELEG_WRITE) {
3790 /* with a write delegation, just take the lock delegated */
3791 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
3792 error = 0;
3793 /* make sure the lock owner knows its open owner */
3794 if (!nlop->nlo_open_owner) {
3795 nfs_open_owner_ref(nofp->nof_owner);
3796 nlop->nlo_open_owner = nofp->nof_owner;
3797 }
3798 break;
3799 } else {
3800 /*
3801 * If we don't have any non-delegated opens but we do have
3802 * delegated opens, then we need to first claim the delegated
3803 * opens so that the lock request on the server can be associated
3804 * with an open it knows about.
3805 */
3806 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
3807 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
3808 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
3809 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
3810 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
3811 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
3812 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
3813 if (error)
3814 break;
3815 }
3816 }
3817 }
3818 if (np->n_flag & NREVOKE)
3819 error = EIO;
3820 if (!error)
3821 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
3822 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3823 break;
3824 /* request was denied due to either conflict or grace period */
3825 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3826 error = EAGAIN;
3827 break;
3828 }
3829 if (flocknflp) {
3830 /* release any currently held shared lock before sleeping */
3831 nfs_open_state_clear_busy(np);
3832 busy = 0;
3833 nfs_mount_state_in_use_end(nmp, 0);
3834 inuse = 0;
3835 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3836 flocknflp = NULL;
3837 if (!error2)
3838 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3839 if (!error2) {
3840 inuse = 1;
3841 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
3842 }
3843 if (error2) {
3844 error = error2;
3845 break;
3846 }
3847 busy = 1;
3848 }
3849 /*
3850 * Wait a little bit and send the request again.
3851 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3852 */
3853 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
3854 if (error == NFSERR_GRACE)
3855 delay = 4;
3856 if (delay < 4)
3857 delay++;
3858 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2));
3859 slpflag = 0;
3860 }
3861 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3862 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3863 /* looks like we have a recover pending... restart */
3864 nfs_open_state_clear_busy(np);
3865 busy = 0;
3866 nfs_mount_state_in_use_end(nmp, 0);
3867 inuse = 0;
3868 goto restart;
3869 }
3870 if (!error && (np->n_flag & NREVOKE))
3871 error = EIO;
3872 } while (!error);
3873
3874 error_out:
3875 if (nfs_mount_state_error_should_restart(error)) {
3876 /* looks like we need to restart this operation */
3877 if (busy) {
3878 nfs_open_state_clear_busy(np);
3879 busy = 0;
3880 }
3881 if (inuse) {
3882 nfs_mount_state_in_use_end(nmp, error);
3883 inuse = 0;
3884 }
3885 goto restart;
3886 }
3887 lck_mtx_lock(&np->n_openlock);
3888 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3889 if (error) {
3890 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3891 if (newnflp->nfl_blockcnt) {
3892 /* wake up anyone blocked on this lock */
3893 wakeup(newnflp);
3894 } else {
3895 /* remove newnflp from lock list and destroy */
3896 if (inqueue)
3897 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3898 nfs_file_lock_destroy(newnflp);
3899 }
3900 lck_mtx_unlock(&np->n_openlock);
3901 if (busy)
3902 nfs_open_state_clear_busy(np);
3903 if (inuse)
3904 nfs_mount_state_in_use_end(nmp, error);
3905 if (nflp2)
3906 nfs_file_lock_destroy(nflp2);
3907 return (error);
3908 }
3909
3910 /* server granted the lock */
3911
3912 /*
3913 * Scan for locks to update.
3914 *
3915 * Locks completely covered are killed.
3916 * At most two locks may need to be clipped.
3917 * It's possible that a single lock may need to be split.
3918 */
3919 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3920 if (nflp == newnflp)
3921 continue;
3922 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3923 continue;
3924 if (nflp->nfl_owner != nlop)
3925 continue;
3926 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3927 continue;
3928 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3929 continue;
3930 /* here's one to update */
3931 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3932 /* The entire lock is being replaced. */
3933 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3934 lck_mtx_lock(&nlop->nlo_lock);
3935 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3936 lck_mtx_unlock(&nlop->nlo_lock);
3937 /* lock will be destroyed below, if no waiters */
3938 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3939 /* We're replacing a range in the middle of a lock. */
3940 /* The current lock will be split into two locks. */
3941 /* Update locks and insert new lock after current lock. */
3942 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
3943 nflp2->nfl_type = nflp->nfl_type;
3944 nflp2->nfl_start = newnflp->nfl_end + 1;
3945 nflp2->nfl_end = nflp->nfl_end;
3946 nflp->nfl_end = newnflp->nfl_start - 1;
3947 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3948 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3949 nextnflp = nflp2;
3950 nflp2 = NULL;
3951 } else if (newnflp->nfl_start > nflp->nfl_start) {
3952 /* We're replacing the end of a lock. */
3953 nflp->nfl_end = newnflp->nfl_start - 1;
3954 } else if (newnflp->nfl_end < nflp->nfl_end) {
3955 /* We're replacing the start of a lock. */
3956 nflp->nfl_start = newnflp->nfl_end + 1;
3957 }
3958 if (nflp->nfl_blockcnt) {
3959 /* wake up anyone blocked on this lock */
3960 wakeup(nflp);
3961 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3962 /* remove nflp from lock list and destroy */
3963 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3964 nfs_file_lock_destroy(nflp);
3965 }
3966 }
3967
3968 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3969
3970 /*
3971 * POSIX locks should be coalesced when possible.
3972 */
3973 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3974 /*
3975 * Walk through the lock queue and check each of our held locks with
3976 * the previous and next locks in the lock owner's "held lock list".
3977 * If the two locks can be coalesced, we merge the current lock into
3978 * the other (previous or next) lock. Merging this way makes sure that
3979 * lock ranges are always merged forward in the lock queue. This is
3980 * important because anyone blocked on the lock being "merged away"
3981 * will still need to block on that range and it will simply continue
3982 * checking locks that are further down the list.
3983 */
3984 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3985 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3986 continue;
3987 if (nflp->nfl_owner != nlop)
3988 continue;
3989 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3990 continue;
3991 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3992 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3993 (coalnflp->nfl_type == nflp->nfl_type) &&
3994 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3995 coalnflp->nfl_end = nflp->nfl_end;
3996 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3997 lck_mtx_lock(&nlop->nlo_lock);
3998 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3999 lck_mtx_unlock(&nlop->nlo_lock);
4000 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4001 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4002 (coalnflp->nfl_type == nflp->nfl_type) &&
4003 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4004 coalnflp->nfl_start = nflp->nfl_start;
4005 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4006 lck_mtx_lock(&nlop->nlo_lock);
4007 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4008 lck_mtx_unlock(&nlop->nlo_lock);
4009 }
4010 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
4011 continue;
4012 if (nflp->nfl_blockcnt) {
4013 /* wake up anyone blocked on this lock */
4014 wakeup(nflp);
4015 } else {
4016 /* remove nflp from lock list and destroy */
4017 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4018 nfs_file_lock_destroy(nflp);
4019 }
4020 }
4021 }
4022
4023 lck_mtx_unlock(&np->n_openlock);
4024 nfs_open_state_clear_busy(np);
4025 nfs_mount_state_in_use_end(nmp, error);
4026
4027 if (nflp2)
4028 nfs_file_lock_destroy(nflp2);
4029 return (error);
4030 }
4031
4032 /*
4033 * Release all (same style) locks within the given range.
4034 */
4035 int
4036 nfs_advlock_unlock(
4037 nfsnode_t np,
4038 struct nfs_open_file *nofp,
4039 struct nfs_lock_owner *nlop,
4040 uint64_t start,
4041 uint64_t end,
4042 int style,
4043 vfs_context_t ctx)
4044 {
4045 struct nfsmount *nmp;
4046 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4047 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4048
4049 nmp = NFSTONMP(np);
4050 if (nfs_mount_gone(nmp))
4051 return (ENXIO);
4052
4053 restart:
4054 if ((error = nfs_mount_state_in_use_start(nmp, NULL)))
4055 return (error);
4056 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4057 nfs_mount_state_in_use_end(nmp, 0);
4058 error = nfs4_reopen(nofp, NULL);
4059 if (error)
4060 return (error);
4061 goto restart;
4062 }
4063 if ((error = nfs_open_state_set_busy(np, NULL))) {
4064 nfs_mount_state_in_use_end(nmp, error);
4065 return (error);
4066 }
4067
4068 lck_mtx_lock(&np->n_openlock);
4069 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4070 /*
4071 * We may need to allocate a new lock if an existing lock gets split.
4072 * So, we first scan the list to check for a split, and if there's
4073 * going to be one, we'll allocate one now.
4074 */
4075 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4076 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4077 continue;
4078 if (nflp->nfl_owner != nlop)
4079 continue;
4080 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4081 continue;
4082 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4083 continue;
4084 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4085 willsplit = 1;
4086 break;
4087 }
4088 }
4089 if (willsplit) {
4090 lck_mtx_unlock(&np->n_openlock);
4091 nfs_open_state_clear_busy(np);
4092 nfs_mount_state_in_use_end(nmp, 0);
4093 newnflp = nfs_file_lock_alloc(nlop);
4094 if (!newnflp)
4095 return (ENOMEM);
4096 goto restart;
4097 }
4098 }
4099
4100 /*
4101 * Free all of our locks in the given range.
4102 *
4103 * Note that this process requires sending requests to the server.
4104 * Because of this, we will release the n_openlock while performing
4105 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4106 * locks from changing underneath us. However, other entries in the
4107 * list may be removed. So we need to be careful walking the list.
4108 */
4109
4110 /*
4111 * Don't unlock ranges that are held by other-style locks.
4112 * If style is posix, don't send any unlock rpcs if flock is held.
4113 * If we unlock an flock, don't send unlock rpcs for any posix-style
4114 * ranges held - instead send unlocks for the ranges not held.
4115 */
4116 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4117 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4118 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
4119 send_unlock_rpcs = 0;
4120 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4121 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4122 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4123 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4124 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4125 uint64_t s = 0;
4126 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4127 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4128 while (!delegated && nflp) {
4129 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4130 /* unlock the range preceding this lock */
4131 lck_mtx_unlock(&np->n_openlock);
4132 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0,
4133 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4134 if (nfs_mount_state_error_should_restart(error)) {
4135 nfs_open_state_clear_busy(np);
4136 nfs_mount_state_in_use_end(nmp, error);
4137 goto restart;
4138 }
4139 lck_mtx_lock(&np->n_openlock);
4140 if (error)
4141 goto out;
4142 s = nflp->nfl_end+1;
4143 }
4144 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4145 }
4146 if (!delegated) {
4147 lck_mtx_unlock(&np->n_openlock);
4148 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4149 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4150 if (nfs_mount_state_error_should_restart(error)) {
4151 nfs_open_state_clear_busy(np);
4152 nfs_mount_state_in_use_end(nmp, error);
4153 goto restart;
4154 }
4155 lck_mtx_lock(&np->n_openlock);
4156 if (error)
4157 goto out;
4158 }
4159 send_unlock_rpcs = 0;
4160 }
4161
4162 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4163 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4164 continue;
4165 if (nflp->nfl_owner != nlop)
4166 continue;
4167 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4168 continue;
4169 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4170 continue;
4171 /* here's one to unlock */
4172 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4173 /* The entire lock is being unlocked. */
4174 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4175 lck_mtx_unlock(&np->n_openlock);
4176 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4177 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4178 if (nfs_mount_state_error_should_restart(error)) {
4179 nfs_open_state_clear_busy(np);
4180 nfs_mount_state_in_use_end(nmp, error);
4181 goto restart;
4182 }
4183 lck_mtx_lock(&np->n_openlock);
4184 }
4185 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4186 if (error)
4187 break;
4188 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4189 lck_mtx_lock(&nlop->nlo_lock);
4190 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4191 lck_mtx_unlock(&nlop->nlo_lock);
4192 /* lock will be destroyed below, if no waiters */
4193 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4194 /* We're unlocking a range in the middle of a lock. */
4195 /* The current lock will be split into two locks. */
4196 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4197 lck_mtx_unlock(&np->n_openlock);
4198 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4199 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4200 if (nfs_mount_state_error_should_restart(error)) {
4201 nfs_open_state_clear_busy(np);
4202 nfs_mount_state_in_use_end(nmp, error);
4203 goto restart;
4204 }
4205 lck_mtx_lock(&np->n_openlock);
4206 }
4207 if (error)
4208 break;
4209 /* update locks and insert new lock after current lock */
4210 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
4211 newnflp->nfl_type = nflp->nfl_type;
4212 newnflp->nfl_start = end + 1;
4213 newnflp->nfl_end = nflp->nfl_end;
4214 nflp->nfl_end = start - 1;
4215 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4216 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4217 nextnflp = newnflp;
4218 newnflp = NULL;
4219 } else if (start > nflp->nfl_start) {
4220 /* We're unlocking the end of a lock. */
4221 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4222 lck_mtx_unlock(&np->n_openlock);
4223 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4224 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4225 if (nfs_mount_state_error_should_restart(error)) {
4226 nfs_open_state_clear_busy(np);
4227 nfs_mount_state_in_use_end(nmp, error);
4228 goto restart;
4229 }
4230 lck_mtx_lock(&np->n_openlock);
4231 }
4232 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4233 if (error)
4234 break;
4235 nflp->nfl_end = start - 1;
4236 } else if (end < nflp->nfl_end) {
4237 /* We're unlocking the start of a lock. */
4238 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4239 lck_mtx_unlock(&np->n_openlock);
4240 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4241 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4242 if (nfs_mount_state_error_should_restart(error)) {
4243 nfs_open_state_clear_busy(np);
4244 nfs_mount_state_in_use_end(nmp, error);
4245 goto restart;
4246 }
4247 lck_mtx_lock(&np->n_openlock);
4248 }
4249 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4250 if (error)
4251 break;
4252 nflp->nfl_start = end + 1;
4253 }
4254 if (nflp->nfl_blockcnt) {
4255 /* wake up anyone blocked on this lock */
4256 wakeup(nflp);
4257 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4258 /* remove nflp from lock list and destroy */
4259 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4260 nfs_file_lock_destroy(nflp);
4261 }
4262 }
4263 out:
4264 lck_mtx_unlock(&np->n_openlock);
4265 nfs_open_state_clear_busy(np);
4266 nfs_mount_state_in_use_end(nmp, 0);
4267
4268 if (newnflp)
4269 nfs_file_lock_destroy(newnflp);
4270 return (error);
4271 }
4272
4273 /*
4274 * NFSv4 advisory file locking
4275 */
4276 int
4277 nfs_vnop_advlock(
4278 struct vnop_advlock_args /* {
4279 struct vnodeop_desc *a_desc;
4280 vnode_t a_vp;
4281 caddr_t a_id;
4282 int a_op;
4283 struct flock *a_fl;
4284 int a_flags;
4285 vfs_context_t a_context;
4286 } */ *ap)
4287 {
4288 vnode_t vp = ap->a_vp;
4289 nfsnode_t np = VTONFS(ap->a_vp);
4290 struct flock *fl = ap->a_fl;
4291 int op = ap->a_op;
4292 int flags = ap->a_flags;
4293 vfs_context_t ctx = ap->a_context;
4294 struct nfsmount *nmp;
4295 struct nfs_open_owner *noop = NULL;
4296 struct nfs_open_file *nofp = NULL;
4297 struct nfs_lock_owner *nlop = NULL;
4298 off_t lstart;
4299 uint64_t start, end;
4300 int error = 0, modified, style;
4301 enum vtype vtype;
4302 #define OFF_MAX QUAD_MAX
4303
4304 nmp = VTONMP(ap->a_vp);
4305 if (nfs_mount_gone(nmp))
4306 return (ENXIO);
4307 lck_mtx_lock(&nmp->nm_lock);
4308 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4309 lck_mtx_unlock(&nmp->nm_lock);
4310 return (ENOTSUP);
4311 }
4312 lck_mtx_unlock(&nmp->nm_lock);
4313
4314 if (np->n_flag & NREVOKE)
4315 return (EIO);
4316 vtype = vnode_vtype(ap->a_vp);
4317 if (vtype == VDIR) /* ignore lock requests on directories */
4318 return (0);
4319 if (vtype != VREG) /* anything other than regular files is invalid */
4320 return (EINVAL);
4321
4322 /* Convert the flock structure into a start and end. */
4323 switch (fl->l_whence) {
4324 case SEEK_SET:
4325 case SEEK_CUR:
4326 /*
4327 * Caller is responsible for adding any necessary offset
4328 * to fl->l_start when SEEK_CUR is used.
4329 */
4330 lstart = fl->l_start;
4331 break;
4332 case SEEK_END:
4333 /* need to flush, and refetch attributes to make */
4334 /* sure we have the correct end of file offset */
4335 if ((error = nfs_node_lock(np)))
4336 return (error);
4337 modified = (np->n_flag & NMODIFIED);
4338 nfs_node_unlock(np);
4339 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
4340 return (error);
4341 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED)))
4342 return (error);
4343 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4344 if ((np->n_size > OFF_MAX) ||
4345 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
4346 error = EOVERFLOW;
4347 lstart = np->n_size + fl->l_start;
4348 nfs_data_unlock(np);
4349 if (error)
4350 return (error);
4351 break;
4352 default:
4353 return (EINVAL);
4354 }
4355 if (lstart < 0)
4356 return (EINVAL);
4357 start = lstart;
4358 if (fl->l_len == 0) {
4359 end = UINT64_MAX;
4360 } else if (fl->l_len > 0) {
4361 if ((fl->l_len - 1) > (OFF_MAX - lstart))
4362 return (EOVERFLOW);
4363 end = start - 1 + fl->l_len;
4364 } else { /* l_len is negative */
4365 if ((lstart + fl->l_len) < 0)
4366 return (EINVAL);
4367 end = start - 1;
4368 start += fl->l_len;
4369 }
4370 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX))))
4371 return (EINVAL);
4372
4373 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4374 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
4375 return (EINVAL);
4376
4377 /* find the lock owner, alloc if not unlock */
4378 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4379 if (!nlop) {
4380 error = (op == F_UNLCK) ? 0 : ENOMEM;
4381 if (error)
4382 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4383 goto out;
4384 }
4385
4386 if (op == F_GETLK) {
4387 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4388 } else {
4389 /* find the open owner */
4390 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4391 if (!noop) {
4392 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4393 error = EPERM;
4394 goto out;
4395 }
4396 /* find the open file */
4397 restart:
4398 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4399 if (error)
4400 error = EBADF;
4401 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4402 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4403 error = EIO;
4404 }
4405 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4406 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4407 nofp = NULL;
4408 if (!error)
4409 goto restart;
4410 }
4411 if (error) {
4412 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4413 goto out;
4414 }
4415 if (op == F_UNLCK) {
4416 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4417 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4418 if ((op == F_SETLK) && (flags & F_WAIT))
4419 op = F_SETLKW;
4420 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4421 } else {
4422 /* not getlk, unlock or lock? */
4423 error = EINVAL;
4424 }
4425 }
4426
4427 out:
4428 if (nlop)
4429 nfs_lock_owner_rele(nlop);
4430 if (noop)
4431 nfs_open_owner_rele(noop);
4432 return (error);
4433 }
4434
4435 /*
4436 * Check if an open owner holds any locks on a file.
4437 */
4438 int
4439 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4440 {
4441 struct nfs_lock_owner *nlop;
4442
4443 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4444 if (nlop->nlo_open_owner != noop)
4445 continue;
4446 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4447 break;
4448 }
4449 return (nlop ? 1 : 0);
4450 }
4451
4452 /*
4453 * Reopen simple (no deny, no locks) open state that was lost.
4454 */
4455 int
4456 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4457 {
4458 struct nfs_open_owner *noop = nofp->nof_owner;
4459 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4460 nfsnode_t np = nofp->nof_np;
4461 vnode_t vp = NFSTOV(np);
4462 vnode_t dvp = NULL;
4463 struct componentname cn;
4464 const char *vname = NULL;
4465 const char *name = NULL;
4466 size_t namelen;
4467 char smallname[128];
4468 char *filename = NULL;
4469 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4470 struct timespec ts = { 1, 0 };
4471
4472 lck_mtx_lock(&nofp->nof_lock);
4473 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4474 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4475 break;
4476 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
4477 slpflag = 0;
4478 }
4479 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4480 lck_mtx_unlock(&nofp->nof_lock);
4481 return (error);
4482 }
4483 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4484 lck_mtx_unlock(&nofp->nof_lock);
4485
4486 nfs_node_lock_force(np);
4487 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4488 /*
4489 * The node's been sillyrenamed, so we need to use
4490 * the sillyrename directory/name to do the open.
4491 */
4492 struct nfs_sillyrename *nsp = np->n_sillyrename;
4493 dvp = NFSTOV(nsp->nsr_dnp);
4494 if ((error = vnode_get(dvp))) {
4495 nfs_node_unlock(np);
4496 goto out;
4497 }
4498 name = nsp->nsr_name;
4499 } else {
4500 /*
4501 * [sigh] We can't trust VFS to get the parent right for named
4502 * attribute nodes. (It likes to reparent the nodes after we've
4503 * created them.) Luckily we can probably get the right parent
4504 * from the n_parent we have stashed away.
4505 */
4506 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4507 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4508 dvp = NULL;
4509 if (!dvp)
4510 dvp = vnode_getparent(vp);
4511 vname = vnode_getname(vp);
4512 if (!dvp || !vname) {
4513 if (!error)
4514 error = EIO;
4515 nfs_node_unlock(np);
4516 goto out;
4517 }
4518 name = vname;
4519 }
4520 filename = &smallname[0];
4521 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4522 if (namelen >= sizeof(smallname)) {
4523 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
4524 if (!filename) {
4525 error = ENOMEM;
4526 goto out;
4527 }
4528 snprintf(filename, namelen+1, "%s", name);
4529 }
4530 nfs_node_unlock(np);
4531 bzero(&cn, sizeof(cn));
4532 cn.cn_nameptr = filename;
4533 cn.cn_namelen = namelen;
4534
4535 restart:
4536 done = 0;
4537 if ((error = nfs_mount_state_in_use_start(nmp, thd)))
4538 goto out;
4539
4540 if (nofp->nof_rw)
4541 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4542 if (!error && nofp->nof_w)
4543 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4544 if (!error && nofp->nof_r)
4545 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4546
4547 if (nfs_mount_state_in_use_end(nmp, error)) {
4548 if (error == NFSERR_GRACE)
4549 goto restart;
4550 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4551 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4552 error = 0;
4553 goto out;
4554 }
4555 done = 1;
4556 out:
4557 if (error && (error != EINTR) && (error != ERESTART))
4558 nfs_revoke_open_state_for_node(np);
4559 lck_mtx_lock(&nofp->nof_lock);
4560 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4561 if (done)
4562 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4563 else if (error)
4564 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4565 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4566 lck_mtx_unlock(&nofp->nof_lock);
4567 if (filename && (filename != &smallname[0]))
4568 FREE(filename, M_TEMP);
4569 if (vname)
4570 vnode_putname(vname);
4571 if (dvp != NULLVP)
4572 vnode_put(dvp);
4573 return (error);
4574 }
4575
4576 /*
4577 * Send a normal OPEN RPC to open/create a file.
4578 */
4579 int
4580 nfs4_open_rpc(
4581 struct nfs_open_file *nofp,
4582 vfs_context_t ctx,
4583 struct componentname *cnp,
4584 struct vnode_attr *vap,
4585 vnode_t dvp,
4586 vnode_t *vpp,
4587 int create,
4588 int share_access,
4589 int share_deny)
4590 {
4591 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4592 cnp, vap, dvp, vpp, create, share_access, share_deny));
4593 }
4594
4595 /*
4596 * Send an OPEN RPC to reopen a file.
4597 */
4598 int
4599 nfs4_open_reopen_rpc(
4600 struct nfs_open_file *nofp,
4601 thread_t thd,
4602 kauth_cred_t cred,
4603 struct componentname *cnp,
4604 vnode_t dvp,
4605 vnode_t *vpp,
4606 int share_access,
4607 int share_deny)
4608 {
4609 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny));
4610 }
4611
4612 /*
4613 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4614 */
4615 int
4616 nfs4_open_confirm_rpc(
4617 struct nfsmount *nmp,
4618 nfsnode_t dnp,
4619 u_char *fhp,
4620 int fhlen,
4621 struct nfs_open_owner *noop,
4622 nfs_stateid *sid,
4623 thread_t thd,
4624 kauth_cred_t cred,
4625 struct nfs_vattr *nvap,
4626 uint64_t *xidp)
4627 {
4628 struct nfsm_chain nmreq, nmrep;
4629 int error = 0, status, numops;
4630 struct nfsreq_secinfo_args si;
4631
4632 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
4633 nfsm_chain_null(&nmreq);
4634 nfsm_chain_null(&nmrep);
4635
4636 // PUTFH, OPEN_CONFIRM, GETATTR
4637 numops = 3;
4638 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4639 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
4640 numops--;
4641 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4642 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4643 numops--;
4644 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4645 nfsm_chain_add_stateid(error, &nmreq, sid);
4646 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4647 numops--;
4648 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4649 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4650 nfsm_chain_build_done(error, &nmreq);
4651 nfsm_assert(error, (numops == 0), EPROTO);
4652 nfsmout_if(error);
4653 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
4654
4655 nfsm_chain_skip_tag(error, &nmrep);
4656 nfsm_chain_get_32(error, &nmrep, numops);
4657 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4658 nfsmout_if(error);
4659 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4660 nfs_owner_seqid_increment(noop, NULL, error);
4661 nfsm_chain_get_stateid(error, &nmrep, sid);
4662 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4663 nfsmout_if(error);
4664 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
4665 nfsmout:
4666 nfsm_chain_cleanup(&nmreq);
4667 nfsm_chain_cleanup(&nmrep);
4668 return (error);
4669 }
4670
4671 /*
4672 * common OPEN RPC code
4673 *
4674 * If create is set, ctx must be passed in.
4675 * Returns a node on success if no node passed in.
4676 */
4677 int
4678 nfs4_open_rpc_internal(
4679 struct nfs_open_file *nofp,
4680 vfs_context_t ctx,
4681 thread_t thd,
4682 kauth_cred_t cred,
4683 struct componentname *cnp,
4684 struct vnode_attr *vap,
4685 vnode_t dvp,
4686 vnode_t *vpp,
4687 int create,
4688 int share_access,
4689 int share_deny)
4690 {
4691 struct nfsmount *nmp;
4692 struct nfs_open_owner *noop = nofp->nof_owner;
4693 struct nfs_vattr nvattr;
4694 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
4695 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
4696 u_int64_t xid, savedxid = 0;
4697 nfsnode_t dnp = VTONFS(dvp);
4698 nfsnode_t np, newnp = NULL;
4699 vnode_t newvp = NULL;
4700 struct nfsm_chain nmreq, nmrep;
4701 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4702 uint32_t rflags, delegation, recall;
4703 struct nfs_stateid stateid, dstateid, *sid;
4704 fhandle_t fh;
4705 struct nfsreq rq, *req = &rq;
4706 struct nfs_dulookup dul;
4707 char sbuf[64], *s;
4708 uint32_t ace_type, ace_flags, ace_mask, len, slen;
4709 struct kauth_ace ace;
4710 struct nfsreq_secinfo_args si;
4711
4712 if (create && !ctx)
4713 return (EINVAL);
4714
4715 nmp = VTONMP(dvp);
4716 if (nfs_mount_gone(nmp))
4717 return (ENXIO);
4718 nfsvers = nmp->nm_vers;
4719 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4720 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
4721 return (EINVAL);
4722
4723 np = *vpp ? VTONFS(*vpp) : NULL;
4724 if (create && vap) {
4725 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4726 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4727 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4728 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4729 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)))
4730 vap->va_vaflags |= VA_UTIMES_NULL;
4731 } else {
4732 exclusive = gotuid = gotgid = 0;
4733 }
4734 if (nofp) {
4735 sid = &nofp->nof_stateid;
4736 } else {
4737 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4738 sid = &stateid;
4739 }
4740
4741 if ((error = nfs_open_owner_set_busy(noop, thd)))
4742 return (error);
4743 again:
4744 rflags = delegation = recall = 0;
4745 ace.ace_flags = 0;
4746 s = sbuf;
4747 slen = sizeof(sbuf);
4748 NVATTR_INIT(&nvattr);
4749 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
4750
4751 nfsm_chain_null(&nmreq);
4752 nfsm_chain_null(&nmrep);
4753
4754 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4755 numops = 6;
4756 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4757 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
4758 numops--;
4759 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4760 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4761 numops--;
4762 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4763 numops--;
4764 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4765 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4766 nfsm_chain_add_32(error, &nmreq, share_access);
4767 nfsm_chain_add_32(error, &nmreq, share_deny);
4768 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
4769 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4770 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
4771 nfsm_chain_add_32(error, &nmreq, create);
4772 if (create) {
4773 if (exclusive) {
4774 static uint32_t create_verf; // XXX need a better verifier
4775 create_verf++;
4776 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4777 /* insert 64 bit verifier */
4778 nfsm_chain_add_32(error, &nmreq, create_verf);
4779 nfsm_chain_add_32(error, &nmreq, create_verf);
4780 } else {
4781 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4782 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4783 }
4784 }
4785 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
4786 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4787 numops--;
4788 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4789 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4790 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4791 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
4792 numops--;
4793 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4794 numops--;
4795 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4796 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4797 nfsm_chain_build_done(error, &nmreq);
4798 nfsm_assert(error, (numops == 0), EPROTO);
4799 if (!error)
4800 error = busyerror = nfs_node_set_busy(dnp, thd);
4801 nfsmout_if(error);
4802
4803 if (create && !namedattrs)
4804 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4805
4806 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
4807 if (!error) {
4808 if (create && !namedattrs)
4809 nfs_dulookup_start(&dul, dnp, ctx);
4810 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4811 savedxid = xid;
4812 }
4813
4814 if (create && !namedattrs)
4815 nfs_dulookup_finish(&dul, dnp, ctx);
4816
4817 if ((lockerror = nfs_node_lock(dnp)))
4818 error = lockerror;
4819 nfsm_chain_skip_tag(error, &nmrep);
4820 nfsm_chain_get_32(error, &nmrep, numops);
4821 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4822 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4823 nfsmout_if(error);
4824 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4825 nfs_owner_seqid_increment(noop, NULL, error);
4826 nfsm_chain_get_stateid(error, &nmrep, sid);
4827 nfsm_chain_check_change_info(error, &nmrep, dnp);
4828 nfsm_chain_get_32(error, &nmrep, rflags);
4829 bmlen = NFS_ATTR_BITMAP_LEN;
4830 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4831 nfsm_chain_get_32(error, &nmrep, delegation);
4832 if (!error)
4833 switch (delegation) {
4834 case NFS_OPEN_DELEGATE_NONE:
4835 break;
4836 case NFS_OPEN_DELEGATE_READ:
4837 case NFS_OPEN_DELEGATE_WRITE:
4838 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4839 nfsm_chain_get_32(error, &nmrep, recall);
4840 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
4841 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4842 /* if we have any trouble accepting the ACE, just invalidate it */
4843 ace_type = ace_flags = ace_mask = len = 0;
4844 nfsm_chain_get_32(error, &nmrep, ace_type);
4845 nfsm_chain_get_32(error, &nmrep, ace_flags);
4846 nfsm_chain_get_32(error, &nmrep, ace_mask);
4847 nfsm_chain_get_32(error, &nmrep, len);
4848 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
4849 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
4850 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
4851 if (!error && (len >= slen)) {
4852 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
4853 if (s)
4854 slen = len+1;
4855 else
4856 ace.ace_flags = 0;
4857 }
4858 if (s)
4859 nfsm_chain_get_opaque(error, &nmrep, len, s);
4860 else
4861 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
4862 if (!error && s) {
4863 s[len] = '\0';
4864 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
4865 ace.ace_flags = 0;
4866 }
4867 if (error || !s)
4868 ace.ace_flags = 0;
4869 if (s && (s != sbuf))
4870 FREE(s, M_TEMP);
4871 break;
4872 default:
4873 error = EBADRPC;
4874 break;
4875 }
4876 /* At this point if we have no error, the object was created/opened. */
4877 open_error = error;
4878 nfsmout_if(error);
4879 if (create && vap && !exclusive)
4880 nfs_vattr_set_supported(bitmap, vap);
4881 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4882 nfsmout_if(error);
4883 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
4884 nfsmout_if(error);
4885 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4886 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
4887 error = EBADRPC;
4888 goto nfsmout;
4889 }
4890 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4891 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4892 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4893 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
4894 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
4895 }
4896 /* directory attributes: if we don't get them, make sure to invalidate */
4897 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4898 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4899 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
4900 if (error)
4901 NATTRINVALIDATE(dnp);
4902 nfsmout_if(error);
4903
4904 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4905 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4906
4907 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4908 nfs_node_unlock(dnp);
4909 lockerror = ENOENT;
4910 NVATTR_CLEANUP(&nvattr);
4911 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
4912 nfsmout_if(error);
4913 savedxid = xid;
4914 if ((lockerror = nfs_node_lock(dnp)))
4915 error = lockerror;
4916 }
4917
4918 nfsmout:
4919 nfsm_chain_cleanup(&nmreq);
4920 nfsm_chain_cleanup(&nmrep);
4921
4922 if (!lockerror && create) {
4923 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4924 dnp->n_flag &= ~NNEGNCENTRIES;
4925 cache_purge_negatives(dvp);
4926 }
4927 dnp->n_flag |= NMODIFIED;
4928 nfs_node_unlock(dnp);
4929 lockerror = ENOENT;
4930 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4931 }
4932 if (!lockerror)
4933 nfs_node_unlock(dnp);
4934 if (!error && !np && fh.fh_len) {
4935 /* create the vnode with the filehandle and attributes */
4936 xid = savedxid;
4937 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
4938 if (!error)
4939 newvp = NFSTOV(newnp);
4940 }
4941 NVATTR_CLEANUP(&nvattr);
4942 if (!busyerror)
4943 nfs_node_clear_busy(dnp);
4944 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4945 if (!np)
4946 np = newnp;
4947 if (!error && np && !recall) {
4948 /* stuff the delegation state in the node */
4949 lck_mtx_lock(&np->n_openlock);
4950 np->n_openflags &= ~N_DELEG_MASK;
4951 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4952 np->n_dstateid = dstateid;
4953 np->n_dace = ace;
4954 if (np->n_dlink.tqe_next == NFSNOLIST) {
4955 lck_mtx_lock(&nmp->nm_lock);
4956 if (np->n_dlink.tqe_next == NFSNOLIST)
4957 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4958 lck_mtx_unlock(&nmp->nm_lock);
4959 }
4960 lck_mtx_unlock(&np->n_openlock);
4961 } else {
4962 /* give the delegation back */
4963 if (np) {
4964 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4965 /* update delegation state and return it */
4966 lck_mtx_lock(&np->n_openlock);
4967 np->n_openflags &= ~N_DELEG_MASK;
4968 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4969 np->n_dstateid = dstateid;
4970 np->n_dace = ace;
4971 if (np->n_dlink.tqe_next == NFSNOLIST) {
4972 lck_mtx_lock(&nmp->nm_lock);
4973 if (np->n_dlink.tqe_next == NFSNOLIST)
4974 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4975 lck_mtx_unlock(&nmp->nm_lock);
4976 }
4977 lck_mtx_unlock(&np->n_openlock);
4978 /* don't need to send a separate delegreturn for fh */
4979 fh.fh_len = 0;
4980 }
4981 /* return np's current delegation */
4982 nfs4_delegation_return(np, 0, thd, cred);
4983 }
4984 if (fh.fh_len) /* return fh's delegation if it wasn't for np */
4985 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
4986 }
4987 }
4988 if (error) {
4989 if (exclusive && (error == NFSERR_NOTSUPP)) {
4990 exclusive = 0;
4991 goto again;
4992 }
4993 if (newvp) {
4994 nfs_node_unlock(newnp);
4995 vnode_put(newvp);
4996 }
4997 } else if (create) {
4998 nfs_node_unlock(newnp);
4999 if (exclusive) {
5000 error = nfs4_setattr_rpc(newnp, vap, ctx);
5001 if (error && (gotuid || gotgid)) {
5002 /* it's possible the server didn't like our attempt to set IDs. */
5003 /* so, let's try it again without those */
5004 VATTR_CLEAR_ACTIVE(vap, va_uid);
5005 VATTR_CLEAR_ACTIVE(vap, va_gid);
5006 error = nfs4_setattr_rpc(newnp, vap, ctx);
5007 }
5008 }
5009 if (error)
5010 vnode_put(newvp);
5011 else
5012 *vpp = newvp;
5013 }
5014 nfs_open_owner_clear_busy(noop);
5015 return (error);
5016 }
5017
5018
5019 /*
5020 * Send an OPEN RPC to claim a delegated open for a file
5021 */
5022 int
5023 nfs4_claim_delegated_open_rpc(
5024 struct nfs_open_file *nofp,
5025 int share_access,
5026 int share_deny,
5027 int flags)
5028 {
5029 struct nfsmount *nmp;
5030 struct nfs_open_owner *noop = nofp->nof_owner;
5031 struct nfs_vattr nvattr;
5032 int error = 0, lockerror = ENOENT, status;
5033 int nfsvers, numops;
5034 u_int64_t xid;
5035 nfsnode_t np = nofp->nof_np;
5036 struct nfsm_chain nmreq, nmrep;
5037 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5038 uint32_t rflags = 0, delegation, recall = 0;
5039 fhandle_t fh;
5040 struct nfs_stateid dstateid;
5041 char sbuf[64], *s = sbuf;
5042 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5043 struct kauth_ace ace;
5044 vnode_t dvp = NULL;
5045 const char *vname = NULL;
5046 const char *name = NULL;
5047 size_t namelen;
5048 char smallname[128];
5049 char *filename = NULL;
5050 struct nfsreq_secinfo_args si;
5051
5052 nmp = NFSTONMP(np);
5053 if (nfs_mount_gone(nmp))
5054 return (ENXIO);
5055 nfsvers = nmp->nm_vers;
5056
5057 nfs_node_lock_force(np);
5058 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5059 /*
5060 * The node's been sillyrenamed, so we need to use
5061 * the sillyrename directory/name to do the open.
5062 */
5063 struct nfs_sillyrename *nsp = np->n_sillyrename;
5064 dvp = NFSTOV(nsp->nsr_dnp);
5065 if ((error = vnode_get(dvp))) {
5066 nfs_node_unlock(np);
5067 goto out;
5068 }
5069 name = nsp->nsr_name;
5070 } else {
5071 /*
5072 * [sigh] We can't trust VFS to get the parent right for named
5073 * attribute nodes. (It likes to reparent the nodes after we've
5074 * created them.) Luckily we can probably get the right parent
5075 * from the n_parent we have stashed away.
5076 */
5077 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5078 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
5079 dvp = NULL;
5080 if (!dvp)
5081 dvp = vnode_getparent(NFSTOV(np));
5082 vname = vnode_getname(NFSTOV(np));
5083 if (!dvp || !vname) {
5084 if (!error)
5085 error = EIO;
5086 nfs_node_unlock(np);
5087 goto out;
5088 }
5089 name = vname;
5090 }
5091 filename = &smallname[0];
5092 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5093 if (namelen >= sizeof(smallname)) {
5094 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
5095 if (!filename) {
5096 error = ENOMEM;
5097 nfs_node_unlock(np);
5098 goto out;
5099 }
5100 snprintf(filename, namelen+1, "%s", name);
5101 }
5102 nfs_node_unlock(np);
5103
5104 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5105 goto out;
5106 NVATTR_INIT(&nvattr);
5107 delegation = NFS_OPEN_DELEGATE_NONE;
5108 dstateid = np->n_dstateid;
5109 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5110
5111 nfsm_chain_null(&nmreq);
5112 nfsm_chain_null(&nmrep);
5113
5114 // PUTFH, OPEN, GETATTR(FH)
5115 numops = 3;
5116 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5117 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5118 numops--;
5119 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5120 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5121 numops--;
5122 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5123 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5124 nfsm_chain_add_32(error, &nmreq, share_access);
5125 nfsm_chain_add_32(error, &nmreq, share_deny);
5126 // open owner: clientid + uid
5127 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5128 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5129 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5130 // openflag4
5131 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5132 // open_claim4
5133 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5134 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5135 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5136 numops--;
5137 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5138 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5139 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5140 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5141 nfsm_chain_build_done(error, &nmreq);
5142 nfsm_assert(error, (numops == 0), EPROTO);
5143 nfsmout_if(error);
5144
5145 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5146 noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5147
5148 if ((lockerror = nfs_node_lock(np)))
5149 error = lockerror;
5150 nfsm_chain_skip_tag(error, &nmrep);
5151 nfsm_chain_get_32(error, &nmrep, numops);
5152 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5153 nfsmout_if(error);
5154 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5155 nfs_owner_seqid_increment(noop, NULL, error);
5156 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5157 nfsm_chain_check_change_info(error, &nmrep, np);
5158 nfsm_chain_get_32(error, &nmrep, rflags);
5159 bmlen = NFS_ATTR_BITMAP_LEN;
5160 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5161 nfsm_chain_get_32(error, &nmrep, delegation);
5162 if (!error)
5163 switch (delegation) {
5164 case NFS_OPEN_DELEGATE_NONE:
5165 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5166 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5167 break;
5168 case NFS_OPEN_DELEGATE_READ:
5169 case NFS_OPEN_DELEGATE_WRITE:
5170 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5171 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5172 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5173 (delegation == NFS_OPEN_DELEGATE_READ)))
5174 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5175 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5176 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5177 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5178 nfsm_chain_get_32(error, &nmrep, recall);
5179 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5180 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5181 /* if we have any trouble accepting the ACE, just invalidate it */
5182 ace_type = ace_flags = ace_mask = len = 0;
5183 nfsm_chain_get_32(error, &nmrep, ace_type);
5184 nfsm_chain_get_32(error, &nmrep, ace_flags);
5185 nfsm_chain_get_32(error, &nmrep, ace_mask);
5186 nfsm_chain_get_32(error, &nmrep, len);
5187 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5188 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5189 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5190 if (!error && (len >= slen)) {
5191 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5192 if (s)
5193 slen = len+1;
5194 else
5195 ace.ace_flags = 0;
5196 }
5197 if (s)
5198 nfsm_chain_get_opaque(error, &nmrep, len, s);
5199 else
5200 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5201 if (!error && s) {
5202 s[len] = '\0';
5203 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5204 ace.ace_flags = 0;
5205 }
5206 if (error || !s)
5207 ace.ace_flags = 0;
5208 if (s && (s != sbuf))
5209 FREE(s, M_TEMP);
5210 if (!error) {
5211 /* stuff the latest delegation state in the node */
5212 lck_mtx_lock(&np->n_openlock);
5213 np->n_openflags &= ~N_DELEG_MASK;
5214 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5215 np->n_dstateid = dstateid;
5216 np->n_dace = ace;
5217 if (np->n_dlink.tqe_next == NFSNOLIST) {
5218 lck_mtx_lock(&nmp->nm_lock);
5219 if (np->n_dlink.tqe_next == NFSNOLIST)
5220 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5221 lck_mtx_unlock(&nmp->nm_lock);
5222 }
5223 lck_mtx_unlock(&np->n_openlock);
5224 }
5225 break;
5226 default:
5227 error = EBADRPC;
5228 break;
5229 }
5230 nfsmout_if(error);
5231 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5232 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5233 nfsmout_if(error);
5234 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5235 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5236 error = EBADRPC;
5237 goto nfsmout;
5238 }
5239 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5240 // XXX what if fh doesn't match the vnode we think we're re-opening?
5241 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5242 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5243 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5244 }
5245 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5246 nfsmout_if(error);
5247 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5248 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5249 nfsmout:
5250 NVATTR_CLEANUP(&nvattr);
5251 nfsm_chain_cleanup(&nmreq);
5252 nfsm_chain_cleanup(&nmrep);
5253 if (!lockerror)
5254 nfs_node_unlock(np);
5255 nfs_open_owner_clear_busy(noop);
5256 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5257 if (recall) {
5258 /*
5259 * We're making a delegated claim.
5260 * Don't return the delegation here in case we have more to claim.
5261 * Just make sure it's queued up to be returned.
5262 */
5263 nfs4_delegation_return_enqueue(np);
5264 }
5265 }
5266 out:
5267 // if (!error)
5268 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5269 if (filename && (filename != &smallname[0]))
5270 FREE(filename, M_TEMP);
5271 if (vname)
5272 vnode_putname(vname);
5273 if (dvp != NULLVP)
5274 vnode_put(dvp);
5275 return (error);
5276 }
5277
5278 /*
5279 * Send an OPEN RPC to reclaim an open file.
5280 */
5281 int
5282 nfs4_open_reclaim_rpc(
5283 struct nfs_open_file *nofp,
5284 int share_access,
5285 int share_deny)
5286 {
5287 struct nfsmount *nmp;
5288 struct nfs_open_owner *noop = nofp->nof_owner;
5289 struct nfs_vattr nvattr;
5290 int error = 0, lockerror = ENOENT, status;
5291 int nfsvers, numops;
5292 u_int64_t xid;
5293 nfsnode_t np = nofp->nof_np;
5294 struct nfsm_chain nmreq, nmrep;
5295 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5296 uint32_t rflags = 0, delegation, recall = 0;
5297 fhandle_t fh;
5298 struct nfs_stateid dstateid;
5299 char sbuf[64], *s = sbuf;
5300 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5301 struct kauth_ace ace;
5302 struct nfsreq_secinfo_args si;
5303
5304 nmp = NFSTONMP(np);
5305 if (nfs_mount_gone(nmp))
5306 return (ENXIO);
5307 nfsvers = nmp->nm_vers;
5308
5309 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5310 return (error);
5311
5312 NVATTR_INIT(&nvattr);
5313 delegation = NFS_OPEN_DELEGATE_NONE;
5314 dstateid = np->n_dstateid;
5315 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5316
5317 nfsm_chain_null(&nmreq);
5318 nfsm_chain_null(&nmrep);
5319
5320 // PUTFH, OPEN, GETATTR(FH)
5321 numops = 3;
5322 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5323 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5324 numops--;
5325 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5326 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5327 numops--;
5328 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5329 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5330 nfsm_chain_add_32(error, &nmreq, share_access);
5331 nfsm_chain_add_32(error, &nmreq, share_deny);
5332 // open owner: clientid + uid
5333 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5334 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5335 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5336 // openflag4
5337 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5338 // open_claim4
5339 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5340 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5341 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5342 NFS_OPEN_DELEGATE_NONE;
5343 nfsm_chain_add_32(error, &nmreq, delegation);
5344 delegation = NFS_OPEN_DELEGATE_NONE;
5345 numops--;
5346 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5347 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5348 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5349 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5350 nfsm_chain_build_done(error, &nmreq);
5351 nfsm_assert(error, (numops == 0), EPROTO);
5352 nfsmout_if(error);
5353
5354 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5355 noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status);
5356
5357 if ((lockerror = nfs_node_lock(np)))
5358 error = lockerror;
5359 nfsm_chain_skip_tag(error, &nmrep);
5360 nfsm_chain_get_32(error, &nmrep, numops);
5361 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5362 nfsmout_if(error);
5363 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5364 nfs_owner_seqid_increment(noop, NULL, error);
5365 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5366 nfsm_chain_check_change_info(error, &nmrep, np);
5367 nfsm_chain_get_32(error, &nmrep, rflags);
5368 bmlen = NFS_ATTR_BITMAP_LEN;
5369 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5370 nfsm_chain_get_32(error, &nmrep, delegation);
5371 if (!error)
5372 switch (delegation) {
5373 case NFS_OPEN_DELEGATE_NONE:
5374 if (np->n_openflags & N_DELEG_MASK) {
5375 /*
5376 * Hey! We were supposed to get our delegation back even
5377 * if it was getting immediately recalled. Bad server!
5378 *
5379 * Just try to return the existing delegation.
5380 */
5381 // NP(np, "nfs: open reclaim didn't return delegation?");
5382 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5383 recall = 1;
5384 }
5385 break;
5386 case NFS_OPEN_DELEGATE_READ:
5387 case NFS_OPEN_DELEGATE_WRITE:
5388 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5389 nfsm_chain_get_32(error, &nmrep, recall);
5390 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5391 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5392 /* if we have any trouble accepting the ACE, just invalidate it */
5393 ace_type = ace_flags = ace_mask = len = 0;
5394 nfsm_chain_get_32(error, &nmrep, ace_type);
5395 nfsm_chain_get_32(error, &nmrep, ace_flags);
5396 nfsm_chain_get_32(error, &nmrep, ace_mask);
5397 nfsm_chain_get_32(error, &nmrep, len);
5398 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5399 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5400 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5401 if (!error && (len >= slen)) {
5402 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5403 if (s)
5404 slen = len+1;
5405 else
5406 ace.ace_flags = 0;
5407 }
5408 if (s)
5409 nfsm_chain_get_opaque(error, &nmrep, len, s);
5410 else
5411 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5412 if (!error && s) {
5413 s[len] = '\0';
5414 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5415 ace.ace_flags = 0;
5416 }
5417 if (error || !s)
5418 ace.ace_flags = 0;
5419 if (s && (s != sbuf))
5420 FREE(s, M_TEMP);
5421 if (!error) {
5422 /* stuff the delegation state in the node */
5423 lck_mtx_lock(&np->n_openlock);
5424 np->n_openflags &= ~N_DELEG_MASK;
5425 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5426 np->n_dstateid = dstateid;
5427 np->n_dace = ace;
5428 if (np->n_dlink.tqe_next == NFSNOLIST) {
5429 lck_mtx_lock(&nmp->nm_lock);
5430 if (np->n_dlink.tqe_next == NFSNOLIST)
5431 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5432 lck_mtx_unlock(&nmp->nm_lock);
5433 }
5434 lck_mtx_unlock(&np->n_openlock);
5435 }
5436 break;
5437 default:
5438 error = EBADRPC;
5439 break;
5440 }
5441 nfsmout_if(error);
5442 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5443 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5444 nfsmout_if(error);
5445 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5446 NP(np, "nfs: open reclaim didn't return filehandle?");
5447 error = EBADRPC;
5448 goto nfsmout;
5449 }
5450 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5451 // XXX what if fh doesn't match the vnode we think we're re-opening?
5452 // That should be pretty hard in this case, given that we are doing
5453 // the open reclaim using the file handle (and not a dir/name pair).
5454 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5455 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5456 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5457 }
5458 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5459 nfsmout_if(error);
5460 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5461 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5462 nfsmout:
5463 // if (!error)
5464 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5465 NVATTR_CLEANUP(&nvattr);
5466 nfsm_chain_cleanup(&nmreq);
5467 nfsm_chain_cleanup(&nmrep);
5468 if (!lockerror)
5469 nfs_node_unlock(np);
5470 nfs_open_owner_clear_busy(noop);
5471 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5472 if (recall)
5473 nfs4_delegation_return_enqueue(np);
5474 }
5475 return (error);
5476 }
5477
5478 int
5479 nfs4_open_downgrade_rpc(
5480 nfsnode_t np,
5481 struct nfs_open_file *nofp,
5482 vfs_context_t ctx)
5483 {
5484 struct nfs_open_owner *noop = nofp->nof_owner;
5485 struct nfsmount *nmp;
5486 int error, lockerror = ENOENT, status, nfsvers, numops;
5487 struct nfsm_chain nmreq, nmrep;
5488 u_int64_t xid;
5489 struct nfsreq_secinfo_args si;
5490
5491 nmp = NFSTONMP(np);
5492 if (nfs_mount_gone(nmp))
5493 return (ENXIO);
5494 nfsvers = nmp->nm_vers;
5495
5496 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5497 return (error);
5498
5499 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5500 nfsm_chain_null(&nmreq);
5501 nfsm_chain_null(&nmrep);
5502
5503 // PUTFH, OPEN_DOWNGRADE, GETATTR
5504 numops = 3;
5505 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5506 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5507 numops--;
5508 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5509 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5510 numops--;
5511 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5512 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5513 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5514 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5515 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5516 numops--;
5517 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5518 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5519 nfsm_chain_build_done(error, &nmreq);
5520 nfsm_assert(error, (numops == 0), EPROTO);
5521 nfsmout_if(error);
5522 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5523 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5524 &si, R_NOINTR, &nmrep, &xid, &status);
5525
5526 if ((lockerror = nfs_node_lock(np)))
5527 error = lockerror;
5528 nfsm_chain_skip_tag(error, &nmrep);
5529 nfsm_chain_get_32(error, &nmrep, numops);
5530 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5531 nfsmout_if(error);
5532 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5533 nfs_owner_seqid_increment(noop, NULL, error);
5534 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5535 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5536 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5537 nfsmout:
5538 if (!lockerror)
5539 nfs_node_unlock(np);
5540 nfs_open_owner_clear_busy(noop);
5541 nfsm_chain_cleanup(&nmreq);
5542 nfsm_chain_cleanup(&nmrep);
5543 return (error);
5544 }
5545
5546 int
5547 nfs4_close_rpc(
5548 nfsnode_t np,
5549 struct nfs_open_file *nofp,
5550 thread_t thd,
5551 kauth_cred_t cred,
5552 int flags)
5553 {
5554 struct nfs_open_owner *noop = nofp->nof_owner;
5555 struct nfsmount *nmp;
5556 int error, lockerror = ENOENT, status, nfsvers, numops;
5557 struct nfsm_chain nmreq, nmrep;
5558 u_int64_t xid;
5559 struct nfsreq_secinfo_args si;
5560
5561 nmp = NFSTONMP(np);
5562 if (nfs_mount_gone(nmp))
5563 return (ENXIO);
5564 nfsvers = nmp->nm_vers;
5565
5566 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5567 return (error);
5568
5569 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5570 nfsm_chain_null(&nmreq);
5571 nfsm_chain_null(&nmrep);
5572
5573 // PUTFH, CLOSE, GETATTR
5574 numops = 3;
5575 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5576 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
5577 numops--;
5578 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5579 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5580 numops--;
5581 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
5582 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5583 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5584 numops--;
5585 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5586 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5587 nfsm_chain_build_done(error, &nmreq);
5588 nfsm_assert(error, (numops == 0), EPROTO);
5589 nfsmout_if(error);
5590 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5591
5592 if ((lockerror = nfs_node_lock(np)))
5593 error = lockerror;
5594 nfsm_chain_skip_tag(error, &nmrep);
5595 nfsm_chain_get_32(error, &nmrep, numops);
5596 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5597 nfsmout_if(error);
5598 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
5599 nfs_owner_seqid_increment(noop, NULL, error);
5600 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5601 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5602 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5603 nfsmout:
5604 if (!lockerror)
5605 nfs_node_unlock(np);
5606 nfs_open_owner_clear_busy(noop);
5607 nfsm_chain_cleanup(&nmreq);
5608 nfsm_chain_cleanup(&nmrep);
5609 return (error);
5610 }
5611
5612
5613 /*
5614 * Claim the delegated open combinations this open file holds.
5615 */
5616 int
5617 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
5618 {
5619 struct nfs_open_owner *noop = nofp->nof_owner;
5620 struct nfs_lock_owner *nlop;
5621 struct nfs_file_lock *nflp, *nextnflp;
5622 struct nfsmount *nmp;
5623 int error = 0, reopen = 0;
5624
5625 if (nofp->nof_d_rw_drw) {
5626 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
5627 if (!error) {
5628 lck_mtx_lock(&nofp->nof_lock);
5629 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
5630 nofp->nof_d_rw_drw = 0;
5631 lck_mtx_unlock(&nofp->nof_lock);
5632 }
5633 }
5634 if (!error && nofp->nof_d_w_drw) {
5635 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
5636 if (!error) {
5637 lck_mtx_lock(&nofp->nof_lock);
5638 nofp->nof_w_drw += nofp->nof_d_w_drw;
5639 nofp->nof_d_w_drw = 0;
5640 lck_mtx_unlock(&nofp->nof_lock);
5641 }
5642 }
5643 if (!error && nofp->nof_d_r_drw) {
5644 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
5645 if (!error) {
5646 lck_mtx_lock(&nofp->nof_lock);
5647 nofp->nof_r_drw += nofp->nof_d_r_drw;
5648 nofp->nof_d_r_drw = 0;
5649 lck_mtx_unlock(&nofp->nof_lock);
5650 }
5651 }
5652 if (!error && nofp->nof_d_rw_dw) {
5653 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
5654 if (!error) {
5655 lck_mtx_lock(&nofp->nof_lock);
5656 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
5657 nofp->nof_d_rw_dw = 0;
5658 lck_mtx_unlock(&nofp->nof_lock);
5659 }
5660 }
5661 if (!error && nofp->nof_d_w_dw) {
5662 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
5663 if (!error) {
5664 lck_mtx_lock(&nofp->nof_lock);
5665 nofp->nof_w_dw += nofp->nof_d_w_dw;
5666 nofp->nof_d_w_dw = 0;
5667 lck_mtx_unlock(&nofp->nof_lock);
5668 }
5669 }
5670 if (!error && nofp->nof_d_r_dw) {
5671 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
5672 if (!error) {
5673 lck_mtx_lock(&nofp->nof_lock);
5674 nofp->nof_r_dw += nofp->nof_d_r_dw;
5675 nofp->nof_d_r_dw = 0;
5676 lck_mtx_unlock(&nofp->nof_lock);
5677 }
5678 }
5679 /* non-deny-mode opens may be reopened if no locks are held */
5680 if (!error && nofp->nof_d_rw) {
5681 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
5682 /* for some errors, we should just try reopening the file */
5683 if (nfs_mount_state_error_delegation_lost(error))
5684 reopen = error;
5685 if (!error || reopen) {
5686 lck_mtx_lock(&nofp->nof_lock);
5687 nofp->nof_rw += nofp->nof_d_rw;
5688 nofp->nof_d_rw = 0;
5689 lck_mtx_unlock(&nofp->nof_lock);
5690 }
5691 }
5692 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5693 if ((!error || reopen) && nofp->nof_d_w) {
5694 if (!error) {
5695 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
5696 /* for some errors, we should just try reopening the file */
5697 if (nfs_mount_state_error_delegation_lost(error))
5698 reopen = error;
5699 }
5700 if (!error || reopen) {
5701 lck_mtx_lock(&nofp->nof_lock);
5702 nofp->nof_w += nofp->nof_d_w;
5703 nofp->nof_d_w = 0;
5704 lck_mtx_unlock(&nofp->nof_lock);
5705 }
5706 }
5707 if ((!error || reopen) && nofp->nof_d_r) {
5708 if (!error) {
5709 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
5710 /* for some errors, we should just try reopening the file */
5711 if (nfs_mount_state_error_delegation_lost(error))
5712 reopen = error;
5713 }
5714 if (!error || reopen) {
5715 lck_mtx_lock(&nofp->nof_lock);
5716 nofp->nof_r += nofp->nof_d_r;
5717 nofp->nof_d_r = 0;
5718 lck_mtx_unlock(&nofp->nof_lock);
5719 }
5720 }
5721
5722 if (reopen) {
5723 /*
5724 * Any problems with the delegation probably indicates that we
5725 * should review/return all of our current delegation state.
5726 */
5727 if ((nmp = NFSTONMP(nofp->nof_np))) {
5728 nfs4_delegation_return_enqueue(nofp->nof_np);
5729 lck_mtx_lock(&nmp->nm_lock);
5730 nfs_need_recover(nmp, NFSERR_EXPIRED);
5731 lck_mtx_unlock(&nmp->nm_lock);
5732 }
5733 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
5734 /* just reopen the file on next access */
5735 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5736 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5737 lck_mtx_lock(&nofp->nof_lock);
5738 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
5739 lck_mtx_unlock(&nofp->nof_lock);
5740 return (0);
5741 }
5742 if (reopen)
5743 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5744 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5745 }
5746
5747 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
5748 /* claim delegated locks */
5749 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
5750 if (nlop->nlo_open_owner != noop)
5751 continue;
5752 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
5753 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5754 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5755 continue;
5756 /* skip non-delegated locks */
5757 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5758 continue;
5759 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
5760 if (error) {
5761 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5762 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5763 break;
5764 }
5765 // else {
5766 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5767 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5768 // }
5769 }
5770 if (error)
5771 break;
5772 }
5773 }
5774
5775 if (!error) /* all state claimed successfully! */
5776 return (0);
5777
5778 /* restart if it looks like a problem more than just losing the delegation */
5779 if (!nfs_mount_state_error_delegation_lost(error) &&
5780 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
5781 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5782 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np))))
5783 nfs_need_reconnect(nmp);
5784 return (error);
5785 }
5786
5787 /* delegated state lost (once held but now not claimable) */
5788 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5789
5790 /*
5791 * Any problems with the delegation probably indicates that we
5792 * should review/return all of our current delegation state.
5793 */
5794 if ((nmp = NFSTONMP(nofp->nof_np))) {
5795 nfs4_delegation_return_enqueue(nofp->nof_np);
5796 lck_mtx_lock(&nmp->nm_lock);
5797 nfs_need_recover(nmp, NFSERR_EXPIRED);
5798 lck_mtx_unlock(&nmp->nm_lock);
5799 }
5800
5801 /* revoke all open file state */
5802 nfs_revoke_open_state_for_node(nofp->nof_np);
5803
5804 return (error);
5805 }
5806
5807 /*
5808 * Release all open state for the given node.
5809 */
5810 void
5811 nfs_release_open_state_for_node(nfsnode_t np, int force)
5812 {
5813 struct nfsmount *nmp = NFSTONMP(np);
5814 struct nfs_open_file *nofp;
5815 struct nfs_file_lock *nflp, *nextnflp;
5816
5817 /* drop held locks */
5818 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
5819 /* skip dead & blocked lock requests */
5820 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5821 continue;
5822 /* send an unlock if not a delegated lock */
5823 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5824 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
5825 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
5826 /* kill/remove the lock */
5827 lck_mtx_lock(&np->n_openlock);
5828 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
5829 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
5830 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
5831 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
5832 if (nflp->nfl_blockcnt) {
5833 /* wake up anyone blocked on this lock */
5834 wakeup(nflp);
5835 } else {
5836 /* remove nflp from lock list and destroy */
5837 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
5838 nfs_file_lock_destroy(nflp);
5839 }
5840 lck_mtx_unlock(&np->n_openlock);
5841 }
5842
5843 lck_mtx_lock(&np->n_openlock);
5844
5845 /* drop all opens */
5846 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5847 if (nofp->nof_flags & NFS_OPEN_FILE_LOST)
5848 continue;
5849 /* mark open state as lost */
5850 lck_mtx_lock(&nofp->nof_lock);
5851 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5852 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
5853
5854 lck_mtx_unlock(&nofp->nof_lock);
5855 if (!force && nmp && (nmp->nm_vers >= NFS_VER4))
5856 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
5857 }
5858
5859 lck_mtx_unlock(&np->n_openlock);
5860 }
5861
5862 /*
5863 * State for a node has been lost, drop it, and revoke the node.
5864 * Attempt to return any state if possible in case the server
5865 * might somehow think we hold it.
5866 */
5867 void
5868 nfs_revoke_open_state_for_node(nfsnode_t np)
5869 {
5870 struct nfsmount *nmp;
5871
5872 /* mark node as needing to be revoked */
5873 nfs_node_lock_force(np);
5874 if (np->n_flag & NREVOKE) /* already revoked? */
5875 {
5876 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
5877 nfs_node_unlock(np);
5878 return;
5879 }
5880 np->n_flag |= NREVOKE;
5881 nfs_node_unlock(np);
5882
5883 nfs_release_open_state_for_node(np, 0);
5884 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
5885
5886 /* mark mount as needing a revoke scan and have the socket thread do it. */
5887 if ((nmp = NFSTONMP(np))) {
5888 lck_mtx_lock(&nmp->nm_lock);
5889 nmp->nm_state |= NFSSTA_REVOKE;
5890 nfs_mount_sock_thread_wake(nmp);
5891 lck_mtx_unlock(&nmp->nm_lock);
5892 }
5893 }
5894
5895 /*
5896 * Claim the delegated open combinations that each of this node's open files hold.
5897 */
5898 int
5899 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
5900 {
5901 struct nfs_open_file *nofp;
5902 int error = 0;
5903
5904 lck_mtx_lock(&np->n_openlock);
5905
5906 /* walk the open file list looking for opens with delegated state to claim */
5907 restart:
5908 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5909 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
5910 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
5911 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r)
5912 continue;
5913 lck_mtx_unlock(&np->n_openlock);
5914 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
5915 lck_mtx_lock(&np->n_openlock);
5916 if (error)
5917 break;
5918 goto restart;
5919 }
5920
5921 lck_mtx_unlock(&np->n_openlock);
5922
5923 return (error);
5924 }
5925
5926 /*
5927 * Mark a node as needed to have its delegation returned.
5928 * Queue it up on the delegation return queue.
5929 * Make sure the thread is running.
5930 */
5931 void
5932 nfs4_delegation_return_enqueue(nfsnode_t np)
5933 {
5934 struct nfsmount *nmp;
5935
5936 nmp = NFSTONMP(np);
5937 if (nfs_mount_gone(nmp))
5938 return;
5939
5940 lck_mtx_lock(&np->n_openlock);
5941 np->n_openflags |= N_DELEG_RETURN;
5942 lck_mtx_unlock(&np->n_openlock);
5943
5944 lck_mtx_lock(&nmp->nm_lock);
5945 if (np->n_dreturn.tqe_next == NFSNOLIST)
5946 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
5947 nfs_mount_sock_thread_wake(nmp);
5948 lck_mtx_unlock(&nmp->nm_lock);
5949 }
5950
5951 /*
5952 * return any delegation we may have for the given node
5953 */
5954 int
5955 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
5956 {
5957 struct nfsmount *nmp;
5958 fhandle_t fh;
5959 nfs_stateid dstateid;
5960 int error;
5961
5962 nmp = NFSTONMP(np);
5963 if (nfs_mount_gone(nmp))
5964 return (ENXIO);
5965
5966 /* first, make sure the node's marked for delegation return */
5967 lck_mtx_lock(&np->n_openlock);
5968 np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING);
5969 lck_mtx_unlock(&np->n_openlock);
5970
5971 /* make sure nobody else is using the delegation state */
5972 if ((error = nfs_open_state_set_busy(np, NULL)))
5973 goto out;
5974
5975 /* claim any delegated state */
5976 if ((error = nfs4_claim_delegated_state_for_node(np, flags)))
5977 goto out;
5978
5979 /* return the delegation */
5980 lck_mtx_lock(&np->n_openlock);
5981 dstateid = np->n_dstateid;
5982 fh.fh_len = np->n_fhsize;
5983 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
5984 lck_mtx_unlock(&np->n_openlock);
5985 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
5986 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5987 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
5988 lck_mtx_lock(&np->n_openlock);
5989 np->n_openflags &= ~N_DELEG_MASK;
5990 lck_mtx_lock(&nmp->nm_lock);
5991 if (np->n_dlink.tqe_next != NFSNOLIST) {
5992 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
5993 np->n_dlink.tqe_next = NFSNOLIST;
5994 }
5995 lck_mtx_unlock(&nmp->nm_lock);
5996 lck_mtx_unlock(&np->n_openlock);
5997 }
5998
5999 out:
6000 /* make sure it's no longer on the return queue and clear the return flags */
6001 lck_mtx_lock(&nmp->nm_lock);
6002 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6003 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6004 np->n_dreturn.tqe_next = NFSNOLIST;
6005 }
6006 lck_mtx_unlock(&nmp->nm_lock);
6007 lck_mtx_lock(&np->n_openlock);
6008 np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING);
6009 lck_mtx_unlock(&np->n_openlock);
6010
6011 if (error) {
6012 NP(np, "nfs4_delegation_return, error %d", error);
6013 if (error == ETIMEDOUT)
6014 nfs_need_reconnect(nmp);
6015 if (nfs_mount_state_error_should_restart(error)) {
6016 /* make sure recovery happens */
6017 lck_mtx_lock(&nmp->nm_lock);
6018 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6019 lck_mtx_unlock(&nmp->nm_lock);
6020 }
6021 }
6022
6023 nfs_open_state_clear_busy(np);
6024
6025 return (error);
6026 }
6027
6028 /*
6029 * RPC to return a delegation for a file handle
6030 */
6031 int
6032 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6033 {
6034 int error = 0, status, numops;
6035 uint64_t xid;
6036 struct nfsm_chain nmreq, nmrep;
6037 struct nfsreq_secinfo_args si;
6038
6039 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6040 nfsm_chain_null(&nmreq);
6041 nfsm_chain_null(&nmrep);
6042
6043 // PUTFH, DELEGRETURN
6044 numops = 2;
6045 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6046 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6047 numops--;
6048 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6049 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6050 numops--;
6051 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6052 nfsm_chain_add_stateid(error, &nmreq, sid);
6053 nfsm_chain_build_done(error, &nmreq);
6054 nfsm_assert(error, (numops == 0), EPROTO);
6055 nfsmout_if(error);
6056 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6057 nfsm_chain_skip_tag(error, &nmrep);
6058 nfsm_chain_get_32(error, &nmrep, numops);
6059 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6060 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6061 nfsmout:
6062 nfsm_chain_cleanup(&nmreq);
6063 nfsm_chain_cleanup(&nmrep);
6064 return (error);
6065 }
6066
6067
6068 /*
6069 * NFS read call.
6070 * Just call nfs_bioread() to do the work.
6071 *
6072 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6073 * without first calling VNOP_OPEN, so we make sure the file is open here.
6074 */
6075 int
6076 nfs_vnop_read(
6077 struct vnop_read_args /* {
6078 struct vnodeop_desc *a_desc;
6079 vnode_t a_vp;
6080 struct uio *a_uio;
6081 int a_ioflag;
6082 vfs_context_t a_context;
6083 } */ *ap)
6084 {
6085 vnode_t vp = ap->a_vp;
6086 vfs_context_t ctx = ap->a_context;
6087 nfsnode_t np;
6088 struct nfsmount *nmp;
6089 struct nfs_open_owner *noop;
6090 struct nfs_open_file *nofp;
6091 int error;
6092
6093 if (vnode_vtype(ap->a_vp) != VREG)
6094 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6095
6096 np = VTONFS(vp);
6097 nmp = NFSTONMP(np);
6098 if (nfs_mount_gone(nmp))
6099 return (ENXIO);
6100 if (np->n_flag & NREVOKE)
6101 return (EIO);
6102
6103 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6104 if (!noop)
6105 return (ENOMEM);
6106 restart:
6107 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6108 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6109 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6110 error = EIO;
6111 }
6112 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6113 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6114 nofp = NULL;
6115 if (!error)
6116 goto restart;
6117 }
6118 if (error) {
6119 nfs_open_owner_rele(noop);
6120 return (error);
6121 }
6122 /*
6123 * Since the read path is a hot path, if we already have
6124 * read access, lets go and try and do the read, without
6125 * busying the mount and open file node for this open owner.
6126 *
6127 * N.B. This is inherently racy w.r.t. an execve using
6128 * an already open file, in that the read at the end of
6129 * this routine will be racing with a potential close.
6130 * The code below ultimately has the same problem. In practice
6131 * this does not seem to be an issue.
6132 */
6133 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6134 nfs_open_owner_rele(noop);
6135 goto do_read;
6136 }
6137 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6138 if (error) {
6139 nfs_open_owner_rele(noop);
6140 return (error);
6141 }
6142 /*
6143 * If we don't have a file already open with the access we need (read) then
6144 * we need to open one. Otherwise we just co-opt an open. We might not already
6145 * have access because we're trying to read the first page of the
6146 * file for execve.
6147 */
6148 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6149 if (error) {
6150 nfs_mount_state_in_use_end(nmp, 0);
6151 nfs_open_owner_rele(noop);
6152 return (error);
6153 }
6154 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6155 /* we don't have the file open, so open it for read access if we're not denied */
6156 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6157 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6158 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6159 }
6160 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6161 nfs_open_file_clear_busy(nofp);
6162 nfs_mount_state_in_use_end(nmp, 0);
6163 nfs_open_owner_rele(noop);
6164 return (EPERM);
6165 }
6166 if (np->n_flag & NREVOKE) {
6167 error = EIO;
6168 nfs_open_file_clear_busy(nofp);
6169 nfs_mount_state_in_use_end(nmp, 0);
6170 nfs_open_owner_rele(noop);
6171 return (error);
6172 }
6173 if (nmp->nm_vers < NFS_VER4) {
6174 /* NFS v2/v3 opens are always allowed - so just add it. */
6175 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6176 } else {
6177 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6178 }
6179 if (!error)
6180 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6181 }
6182 if (nofp)
6183 nfs_open_file_clear_busy(nofp);
6184 if (nfs_mount_state_in_use_end(nmp, error)) {
6185 nofp = NULL;
6186 goto restart;
6187 }
6188 nfs_open_owner_rele(noop);
6189 if (error)
6190 return (error);
6191 do_read:
6192 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
6193 }
6194
6195 /*
6196 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6197 * Files are created using the NFSv4 OPEN RPC. So we must open the
6198 * file to create it and then close it.
6199 */
6200 int
6201 nfs4_vnop_create(
6202 struct vnop_create_args /* {
6203 struct vnodeop_desc *a_desc;
6204 vnode_t a_dvp;
6205 vnode_t *a_vpp;
6206 struct componentname *a_cnp;
6207 struct vnode_attr *a_vap;
6208 vfs_context_t a_context;
6209 } */ *ap)
6210 {
6211 vfs_context_t ctx = ap->a_context;
6212 struct componentname *cnp = ap->a_cnp;
6213 struct vnode_attr *vap = ap->a_vap;
6214 vnode_t dvp = ap->a_dvp;
6215 vnode_t *vpp = ap->a_vpp;
6216 struct nfsmount *nmp;
6217 nfsnode_t np;
6218 int error = 0, busyerror = 0, accessMode, denyMode;
6219 struct nfs_open_owner *noop = NULL;
6220 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6221
6222 nmp = VTONMP(dvp);
6223 if (nfs_mount_gone(nmp))
6224 return (ENXIO);
6225
6226 if (vap)
6227 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6228
6229 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6230 if (!noop)
6231 return (ENOMEM);
6232
6233 restart:
6234 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6235 if (error) {
6236 nfs_open_owner_rele(noop);
6237 return (error);
6238 }
6239
6240 /* grab a provisional, nodeless open file */
6241 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6242 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6243 printf("nfs_vnop_create: LOST\n");
6244 error = EIO;
6245 }
6246 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6247 /* This shouldn't happen given that this is a new, nodeless nofp */
6248 nfs_mount_state_in_use_end(nmp, 0);
6249 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6250 nfs_open_file_destroy(newnofp);
6251 newnofp = NULL;
6252 if (!error)
6253 goto restart;
6254 }
6255 if (!error)
6256 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6257 if (error) {
6258 if (newnofp)
6259 nfs_open_file_destroy(newnofp);
6260 newnofp = NULL;
6261 goto out;
6262 }
6263
6264 /*
6265 * We're just trying to create the file.
6266 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6267 */
6268 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6269 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6270
6271 /* Do the open/create */
6272 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6273 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6274 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6275 /*
6276 * Hmm... it looks like we may have a situation where the request was
6277 * retransmitted because we didn't get the first response which successfully
6278 * created/opened the file and then the second time we were denied the open
6279 * because the mode the file was created with doesn't allow write access.
6280 *
6281 * We'll try to work around this by temporarily updating the mode and
6282 * retrying the open.
6283 */
6284 struct vnode_attr vattr;
6285
6286 /* first make sure it's there */
6287 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6288 if (!error2 && np) {
6289 nfs_node_unlock(np);
6290 *vpp = NFSTOV(np);
6291 if (vnode_vtype(NFSTOV(np)) == VREG) {
6292 VATTR_INIT(&vattr);
6293 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6294 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6295 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6296 VATTR_INIT(&vattr);
6297 VATTR_SET(&vattr, va_mode, vap->va_mode);
6298 nfs4_setattr_rpc(np, &vattr, ctx);
6299 if (!error2)
6300 error = 0;
6301 }
6302 }
6303 if (error) {
6304 vnode_put(*vpp);
6305 *vpp = NULL;
6306 }
6307 }
6308 }
6309 if (!error && !*vpp) {
6310 printf("nfs4_open_rpc returned without a node?\n");
6311 /* Hmmm... with no node, we have no filehandle and can't close it */
6312 error = EIO;
6313 }
6314 if (error) {
6315 /* need to cleanup our temporary nofp */
6316 nfs_open_file_clear_busy(newnofp);
6317 nfs_open_file_destroy(newnofp);
6318 newnofp = NULL;
6319 goto out;
6320 }
6321 /* After we have a node, add our open file struct to the node */
6322 np = VTONFS(*vpp);
6323 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6324 nofp = newnofp;
6325 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6326 if (error) {
6327 /* This shouldn't happen, because we passed in a new nofp to use. */
6328 printf("nfs_open_file_find_internal failed! %d\n", error);
6329 goto out;
6330 } else if (nofp != newnofp) {
6331 /*
6332 * Hmm... an open file struct already exists.
6333 * Mark the existing one busy and merge our open into it.
6334 * Then destroy the one we created.
6335 * Note: there's no chance of an open confict because the
6336 * open has already been granted.
6337 */
6338 busyerror = nfs_open_file_set_busy(nofp, NULL);
6339 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6340 nofp->nof_stateid = newnofp->nof_stateid;
6341 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
6342 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6343 nfs_open_file_clear_busy(newnofp);
6344 nfs_open_file_destroy(newnofp);
6345 }
6346 newnofp = NULL;
6347 /* mark the node as holding a create-initiated open */
6348 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6349 nofp->nof_creator = current_thread();
6350 out:
6351 if (nofp && !busyerror)
6352 nfs_open_file_clear_busy(nofp);
6353 if (nfs_mount_state_in_use_end(nmp, error)) {
6354 nofp = newnofp = NULL;
6355 busyerror = 0;
6356 goto restart;
6357 }
6358 if (noop)
6359 nfs_open_owner_rele(noop);
6360 return (error);
6361 }
6362
6363 /*
6364 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6365 */
6366 int
6367 nfs4_create_rpc(
6368 vfs_context_t ctx,
6369 nfsnode_t dnp,
6370 struct componentname *cnp,
6371 struct vnode_attr *vap,
6372 int type,
6373 char *link,
6374 nfsnode_t *npp)
6375 {
6376 struct nfsmount *nmp;
6377 struct nfs_vattr nvattr;
6378 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6379 int nfsvers, namedattrs, numops;
6380 u_int64_t xid, savedxid = 0;
6381 nfsnode_t np = NULL;
6382 vnode_t newvp = NULL;
6383 struct nfsm_chain nmreq, nmrep;
6384 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6385 const char *tag;
6386 nfs_specdata sd;
6387 fhandle_t fh;
6388 struct nfsreq rq, *req = &rq;
6389 struct nfs_dulookup dul;
6390 struct nfsreq_secinfo_args si;
6391
6392 nmp = NFSTONMP(dnp);
6393 if (nfs_mount_gone(nmp))
6394 return (ENXIO);
6395 nfsvers = nmp->nm_vers;
6396 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6397 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6398 return (EINVAL);
6399
6400 sd.specdata1 = sd.specdata2 = 0;
6401
6402 switch (type) {
6403 case NFLNK:
6404 tag = "symlink";
6405 break;
6406 case NFBLK:
6407 case NFCHR:
6408 tag = "mknod";
6409 if (!VATTR_IS_ACTIVE(vap, va_rdev))
6410 return (EINVAL);
6411 sd.specdata1 = major(vap->va_rdev);
6412 sd.specdata2 = minor(vap->va_rdev);
6413 break;
6414 case NFSOCK:
6415 case NFFIFO:
6416 tag = "mknod";
6417 break;
6418 case NFDIR:
6419 tag = "mkdir";
6420 break;
6421 default:
6422 return (EINVAL);
6423 }
6424
6425 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6426
6427 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6428 if (!namedattrs)
6429 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6430
6431 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6432 NVATTR_INIT(&nvattr);
6433 nfsm_chain_null(&nmreq);
6434 nfsm_chain_null(&nmrep);
6435
6436 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6437 numops = 6;
6438 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6439 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6440 numops--;
6441 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6442 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6443 numops--;
6444 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6445 numops--;
6446 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6447 nfsm_chain_add_32(error, &nmreq, type);
6448 if (type == NFLNK) {
6449 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6450 } else if ((type == NFBLK) || (type == NFCHR)) {
6451 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6452 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6453 }
6454 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6455 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6456 numops--;
6457 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6458 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6459 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6460 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6461 numops--;
6462 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6463 numops--;
6464 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6465 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6466 nfsm_chain_build_done(error, &nmreq);
6467 nfsm_assert(error, (numops == 0), EPROTO);
6468 nfsmout_if(error);
6469
6470 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6471 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6472 if (!error) {
6473 if (!namedattrs)
6474 nfs_dulookup_start(&dul, dnp, ctx);
6475 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6476 }
6477
6478 if ((lockerror = nfs_node_lock(dnp)))
6479 error = lockerror;
6480 nfsm_chain_skip_tag(error, &nmrep);
6481 nfsm_chain_get_32(error, &nmrep, numops);
6482 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6483 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6484 nfsmout_if(error);
6485 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6486 nfsm_chain_check_change_info(error, &nmrep, dnp);
6487 bmlen = NFS_ATTR_BITMAP_LEN;
6488 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6489 /* At this point if we have no error, the object was created. */
6490 /* if we don't get attributes, then we should lookitup. */
6491 create_error = error;
6492 nfsmout_if(error);
6493 nfs_vattr_set_supported(bitmap, vap);
6494 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6495 nfsmout_if(error);
6496 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6497 nfsmout_if(error);
6498 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6499 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6500 error = EBADRPC;
6501 goto nfsmout;
6502 }
6503 /* directory attributes: if we don't get them, make sure to invalidate */
6504 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6505 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6506 savedxid = xid;
6507 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
6508 if (error)
6509 NATTRINVALIDATE(dnp);
6510
6511 nfsmout:
6512 nfsm_chain_cleanup(&nmreq);
6513 nfsm_chain_cleanup(&nmrep);
6514
6515 if (!lockerror) {
6516 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
6517 dnp->n_flag &= ~NNEGNCENTRIES;
6518 cache_purge_negatives(NFSTOV(dnp));
6519 }
6520 dnp->n_flag |= NMODIFIED;
6521 nfs_node_unlock(dnp);
6522 /* nfs_getattr() will check changed and purge caches */
6523 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6524 }
6525
6526 if (!error && fh.fh_len) {
6527 /* create the vnode with the filehandle and attributes */
6528 xid = savedxid;
6529 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6530 if (!error)
6531 newvp = NFSTOV(np);
6532 }
6533 NVATTR_CLEANUP(&nvattr);
6534
6535 if (!namedattrs)
6536 nfs_dulookup_finish(&dul, dnp, ctx);
6537
6538 /*
6539 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6540 * if we can succeed in looking up the object.
6541 */
6542 if ((create_error == EEXIST) || (!create_error && !newvp)) {
6543 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6544 if (!error) {
6545 newvp = NFSTOV(np);
6546 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers))
6547 error = EEXIST;
6548 }
6549 }
6550 if (!busyerror)
6551 nfs_node_clear_busy(dnp);
6552 if (error) {
6553 if (newvp) {
6554 nfs_node_unlock(np);
6555 vnode_put(newvp);
6556 }
6557 } else {
6558 nfs_node_unlock(np);
6559 *npp = np;
6560 }
6561 return (error);
6562 }
6563
6564 int
6565 nfs4_vnop_mknod(
6566 struct vnop_mknod_args /* {
6567 struct vnodeop_desc *a_desc;
6568 vnode_t a_dvp;
6569 vnode_t *a_vpp;
6570 struct componentname *a_cnp;
6571 struct vnode_attr *a_vap;
6572 vfs_context_t a_context;
6573 } */ *ap)
6574 {
6575 nfsnode_t np = NULL;
6576 struct nfsmount *nmp;
6577 int error;
6578
6579 nmp = VTONMP(ap->a_dvp);
6580 if (nfs_mount_gone(nmp))
6581 return (ENXIO);
6582
6583 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
6584 return (EINVAL);
6585 switch (ap->a_vap->va_type) {
6586 case VBLK:
6587 case VCHR:
6588 case VFIFO:
6589 case VSOCK:
6590 break;
6591 default:
6592 return (ENOTSUP);
6593 }
6594
6595 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6596 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
6597 if (!error)
6598 *ap->a_vpp = NFSTOV(np);
6599 return (error);
6600 }
6601
6602 int
6603 nfs4_vnop_mkdir(
6604 struct vnop_mkdir_args /* {
6605 struct vnodeop_desc *a_desc;
6606 vnode_t a_dvp;
6607 vnode_t *a_vpp;
6608 struct componentname *a_cnp;
6609 struct vnode_attr *a_vap;
6610 vfs_context_t a_context;
6611 } */ *ap)
6612 {
6613 nfsnode_t np = NULL;
6614 int error;
6615
6616 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6617 NFDIR, NULL, &np);
6618 if (!error)
6619 *ap->a_vpp = NFSTOV(np);
6620 return (error);
6621 }
6622
6623 int
6624 nfs4_vnop_symlink(
6625 struct vnop_symlink_args /* {
6626 struct vnodeop_desc *a_desc;
6627 vnode_t a_dvp;
6628 vnode_t *a_vpp;
6629 struct componentname *a_cnp;
6630 struct vnode_attr *a_vap;
6631 char *a_target;
6632 vfs_context_t a_context;
6633 } */ *ap)
6634 {
6635 nfsnode_t np = NULL;
6636 int error;
6637
6638 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6639 NFLNK, ap->a_target, &np);
6640 if (!error)
6641 *ap->a_vpp = NFSTOV(np);
6642 return (error);
6643 }
6644
6645 int
6646 nfs4_vnop_link(
6647 struct vnop_link_args /* {
6648 struct vnodeop_desc *a_desc;
6649 vnode_t a_vp;
6650 vnode_t a_tdvp;
6651 struct componentname *a_cnp;
6652 vfs_context_t a_context;
6653 } */ *ap)
6654 {
6655 vfs_context_t ctx = ap->a_context;
6656 vnode_t vp = ap->a_vp;
6657 vnode_t tdvp = ap->a_tdvp;
6658 struct componentname *cnp = ap->a_cnp;
6659 int error = 0, lockerror = ENOENT, status;
6660 struct nfsmount *nmp;
6661 nfsnode_t np = VTONFS(vp);
6662 nfsnode_t tdnp = VTONFS(tdvp);
6663 int nfsvers, numops;
6664 u_int64_t xid, savedxid;
6665 struct nfsm_chain nmreq, nmrep;
6666 struct nfsreq_secinfo_args si;
6667
6668 if (vnode_mount(vp) != vnode_mount(tdvp))
6669 return (EXDEV);
6670
6671 nmp = VTONMP(vp);
6672 if (nfs_mount_gone(nmp))
6673 return (ENXIO);
6674 nfsvers = nmp->nm_vers;
6675 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6676 return (EINVAL);
6677 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6678 return (EINVAL);
6679
6680 /*
6681 * Push all writes to the server, so that the attribute cache
6682 * doesn't get "out of sync" with the server.
6683 * XXX There should be a better way!
6684 */
6685 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
6686
6687 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
6688 return (error);
6689
6690 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6691 nfsm_chain_null(&nmreq);
6692 nfsm_chain_null(&nmrep);
6693
6694 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6695 numops = 7;
6696 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
6697 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6698 numops--;
6699 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6700 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6701 numops--;
6702 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6703 numops--;
6704 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6705 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
6706 numops--;
6707 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
6708 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6709 numops--;
6710 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6711 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
6712 numops--;
6713 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6714 numops--;
6715 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6716 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6717 nfsm_chain_build_done(error, &nmreq);
6718 nfsm_assert(error, (numops == 0), EPROTO);
6719 nfsmout_if(error);
6720 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
6721
6722 if ((lockerror = nfs_node_lock2(tdnp, np))) {
6723 error = lockerror;
6724 goto nfsmout;
6725 }
6726 nfsm_chain_skip_tag(error, &nmrep);
6727 nfsm_chain_get_32(error, &nmrep, numops);
6728 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6729 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6730 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6731 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
6732 nfsm_chain_check_change_info(error, &nmrep, tdnp);
6733 /* directory attributes: if we don't get them, make sure to invalidate */
6734 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6735 savedxid = xid;
6736 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
6737 if (error)
6738 NATTRINVALIDATE(tdnp);
6739 /* link attributes: if we don't get them, make sure to invalidate */
6740 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6741 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6742 xid = savedxid;
6743 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6744 if (error)
6745 NATTRINVALIDATE(np);
6746 nfsmout:
6747 nfsm_chain_cleanup(&nmreq);
6748 nfsm_chain_cleanup(&nmrep);
6749 if (!lockerror)
6750 tdnp->n_flag |= NMODIFIED;
6751 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6752 if (error == EEXIST)
6753 error = 0;
6754 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
6755 tdnp->n_flag &= ~NNEGNCENTRIES;
6756 cache_purge_negatives(tdvp);
6757 }
6758 if (!lockerror)
6759 nfs_node_unlock2(tdnp, np);
6760 nfs_node_clear_busy2(tdnp, np);
6761 return (error);
6762 }
6763
6764 int
6765 nfs4_vnop_rmdir(
6766 struct vnop_rmdir_args /* {
6767 struct vnodeop_desc *a_desc;
6768 vnode_t a_dvp;
6769 vnode_t a_vp;
6770 struct componentname *a_cnp;
6771 vfs_context_t a_context;
6772 } */ *ap)
6773 {
6774 vfs_context_t ctx = ap->a_context;
6775 vnode_t vp = ap->a_vp;
6776 vnode_t dvp = ap->a_dvp;
6777 struct componentname *cnp = ap->a_cnp;
6778 struct nfsmount *nmp;
6779 int error = 0, namedattrs;
6780 nfsnode_t np = VTONFS(vp);
6781 nfsnode_t dnp = VTONFS(dvp);
6782 struct nfs_dulookup dul;
6783
6784 if (vnode_vtype(vp) != VDIR)
6785 return (EINVAL);
6786
6787 nmp = NFSTONMP(dnp);
6788 if (nfs_mount_gone(nmp))
6789 return (ENXIO);
6790 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6791
6792 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
6793 return (error);
6794
6795 if (!namedattrs) {
6796 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6797 nfs_dulookup_start(&dul, dnp, ctx);
6798 }
6799
6800 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6801 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6802
6803 nfs_name_cache_purge(dnp, np, cnp, ctx);
6804 /* nfs_getattr() will check changed and purge caches */
6805 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6806 if (!namedattrs)
6807 nfs_dulookup_finish(&dul, dnp, ctx);
6808 nfs_node_clear_busy2(dnp, np);
6809
6810 /*
6811 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6812 */
6813 if (error == ENOENT)
6814 error = 0;
6815 if (!error) {
6816 /*
6817 * remove nfsnode from hash now so we can't accidentally find it
6818 * again if another object gets created with the same filehandle
6819 * before this vnode gets reclaimed
6820 */
6821 lck_mtx_lock(nfs_node_hash_mutex);
6822 if (np->n_hflag & NHHASHED) {
6823 LIST_REMOVE(np, n_hash);
6824 np->n_hflag &= ~NHHASHED;
6825 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
6826 }
6827 lck_mtx_unlock(nfs_node_hash_mutex);
6828 }
6829 return (error);
6830 }
6831
6832 /*
6833 * NFSv4 Named Attributes
6834 *
6835 * Both the extended attributes interface and the named streams interface
6836 * are backed by NFSv4 named attributes. The implementations for both use
6837 * a common set of routines in an attempt to reduce code duplication, to
6838 * increase efficiency, to increase caching of both names and data, and to
6839 * confine the complexity.
6840 *
6841 * Each NFS node caches its named attribute directory's file handle.
6842 * The directory nodes for the named attribute directories are handled
6843 * exactly like regular directories (with a couple minor exceptions).
6844 * Named attribute nodes are also treated as much like regular files as
6845 * possible.
6846 *
6847 * Most of the heavy lifting is done by nfs4_named_attr_get().
6848 */
6849
6850 /*
6851 * Get the given node's attribute directory node.
6852 * If !fetch, then only return a cached node.
6853 * Otherwise, we will attempt to fetch the node from the server.
6854 * (Note: the node should be marked busy.)
6855 */
6856 nfsnode_t
6857 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
6858 {
6859 nfsnode_t adnp = NULL;
6860 struct nfsmount *nmp;
6861 int error = 0, status, numops;
6862 struct nfsm_chain nmreq, nmrep;
6863 u_int64_t xid;
6864 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
6865 fhandle_t fh;
6866 struct nfs_vattr nvattr;
6867 struct componentname cn;
6868 struct nfsreq rq, *req = &rq;
6869 struct nfsreq_secinfo_args si;
6870
6871 nmp = NFSTONMP(np);
6872 if (nfs_mount_gone(nmp))
6873 return (NULL);
6874 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6875 return (NULL);
6876
6877 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6878 NVATTR_INIT(&nvattr);
6879 nfsm_chain_null(&nmreq);
6880 nfsm_chain_null(&nmrep);
6881
6882 bzero(&cn, sizeof(cn));
6883 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
6884 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
6885 cn.cn_nameiop = LOOKUP;
6886
6887 if (np->n_attrdirfh) {
6888 // XXX can't set parent correctly (to np) yet
6889 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh,
6890 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
6891 if (adnp)
6892 goto nfsmout;
6893 }
6894 if (!fetch) {
6895 error = ENOENT;
6896 goto nfsmout;
6897 }
6898
6899 // PUTFH, OPENATTR, GETATTR
6900 numops = 3;
6901 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
6902 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6903 numops--;
6904 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6905 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
6906 numops--;
6907 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
6908 nfsm_chain_add_32(error, &nmreq, 0);
6909 numops--;
6910 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6911 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6912 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6913 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
6914 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6915 nfsm_chain_build_done(error, &nmreq);
6916 nfsm_assert(error, (numops == 0), EPROTO);
6917 nfsmout_if(error);
6918 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6919 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6920 if (!error)
6921 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6922
6923 nfsm_chain_skip_tag(error, &nmrep);
6924 nfsm_chain_get_32(error, &nmrep, numops);
6925 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6926 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
6927 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6928 nfsmout_if(error);
6929 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6930 nfsmout_if(error);
6931 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
6932 error = ENOENT;
6933 goto nfsmout;
6934 }
6935 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
6936 /* (re)allocate attrdir fh buffer */
6937 if (np->n_attrdirfh)
6938 FREE(np->n_attrdirfh, M_TEMP);
6939 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
6940 }
6941 if (!np->n_attrdirfh) {
6942 error = ENOMEM;
6943 goto nfsmout;
6944 }
6945 /* cache the attrdir fh in the node */
6946 *np->n_attrdirfh = fh.fh_len;
6947 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
6948 /* create node for attrdir */
6949 // XXX can't set parent correctly (to np) yet
6950 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
6951 nfsmout:
6952 NVATTR_CLEANUP(&nvattr);
6953 nfsm_chain_cleanup(&nmreq);
6954 nfsm_chain_cleanup(&nmrep);
6955
6956 if (adnp) {
6957 /* sanity check that this node is an attribute directory */
6958 if (adnp->n_vattr.nva_type != VDIR)
6959 error = EINVAL;
6960 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
6961 error = EINVAL;
6962 nfs_node_unlock(adnp);
6963 if (error)
6964 vnode_put(NFSTOV(adnp));
6965 }
6966 return (error ? NULL : adnp);
6967 }
6968
6969 /*
6970 * Get the given node's named attribute node for the name given.
6971 *
6972 * In an effort to increase the performance of named attribute access, we try
6973 * to reduce server requests by doing the following:
6974 *
6975 * - cache the node's named attribute directory file handle in the node
6976 * - maintain a directory vnode for the attribute directory
6977 * - use name cache entries (positive and negative) to speed up lookups
6978 * - optionally open the named attribute (with the given accessMode) in the same RPC
6979 * - combine attribute directory retrieval with the lookup/open RPC
6980 * - optionally prefetch the named attribute's first block of data in the same RPC
6981 *
6982 * Also, in an attempt to reduce the number of copies/variations of this code,
6983 * parts of the RPC building/processing code are conditionalized on what is
6984 * needed for any particular request (openattr, lookup vs. open, read).
6985 *
6986 * Note that because we may not have the attribute directory node when we start
6987 * the lookup/open, we lock both the node and the attribute directory node.
6988 */
6989
6990 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6991 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6992 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6993 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6994
6995 int
6996 nfs4_named_attr_get(
6997 nfsnode_t np,
6998 struct componentname *cnp,
6999 uint32_t accessMode,
7000 int flags,
7001 vfs_context_t ctx,
7002 nfsnode_t *anpp,
7003 struct nfs_open_file **nofpp)
7004 {
7005 struct nfsmount *nmp;
7006 int error = 0, open_error = EIO;
7007 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7008 int create, guarded, prefetch, truncate, noopbusy = 0;
7009 int open, status, numops, hadattrdir, negnamecache;
7010 struct nfs_vattr nvattr;
7011 struct vnode_attr vattr;
7012 nfsnode_t adnp = NULL, anp = NULL;
7013 vnode_t avp = NULL;
7014 u_int64_t xid, savedxid = 0;
7015 struct nfsm_chain nmreq, nmrep;
7016 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7017 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7018 nfs_stateid stateid, dstateid;
7019 fhandle_t fh;
7020 struct nfs_open_owner *noop = NULL;
7021 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7022 struct vnop_access_args naa;
7023 thread_t thd;
7024 kauth_cred_t cred;
7025 struct timeval now;
7026 char sbuf[64], *s;
7027 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7028 struct kauth_ace ace;
7029 struct nfsreq rq, *req = &rq;
7030 struct nfsreq_secinfo_args si;
7031
7032 *anpp = NULL;
7033 fh.fh_len = 0;
7034 rflags = delegation = recall = eof = rlen = retlen = 0;
7035 ace.ace_flags = 0;
7036 s = sbuf;
7037 slen = sizeof(sbuf);
7038
7039 nmp = NFSTONMP(np);
7040 if (nfs_mount_gone(nmp))
7041 return (ENXIO);
7042 NVATTR_INIT(&nvattr);
7043 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7044 thd = vfs_context_thread(ctx);
7045 cred = vfs_context_ucred(ctx);
7046 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7047 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7048 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7049 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7050
7051 if (!create) {
7052 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7053 if (error)
7054 return (error);
7055 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7056 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7057 return (ENOATTR);
7058 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7059 /* shouldn't happen... but just be safe */
7060 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7061 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7062 }
7063 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7064 if (open) {
7065 /*
7066 * We're trying to open the file.
7067 * We'll create/open it with the given access mode,
7068 * and set NFS_OPEN_FILE_CREATE.
7069 */
7070 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7071 if (prefetch && guarded)
7072 prefetch = 0; /* no sense prefetching data that can't be there */
7073
7074 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7075 if (!noop)
7076 return (ENOMEM);
7077 }
7078
7079 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7080 return (error);
7081
7082 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7083 hadattrdir = (adnp != NULL);
7084 if (prefetch) {
7085 microuptime(&now);
7086 /* use the special state ID because we don't have a real one to send */
7087 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7088 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7089 }
7090 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7091 nfsm_chain_null(&nmreq);
7092 nfsm_chain_null(&nmrep);
7093
7094 if (hadattrdir) {
7095 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx))))
7096 goto nfsmout;
7097 /* nfs_getattr() will check changed and purge caches */
7098 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7099 nfsmout_if(error);
7100 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7101 switch (error) {
7102 case ENOENT:
7103 /* negative cache entry */
7104 goto nfsmout;
7105 case 0:
7106 /* cache miss */
7107 /* try dir buf cache lookup */
7108 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7109 if (!error && anp) {
7110 /* dir buf cache hit */
7111 *anpp = anp;
7112 error = -1;
7113 }
7114 if (error != -1) /* cache miss */
7115 break;
7116 /* FALLTHROUGH */
7117 case -1:
7118 /* cache hit, not really an error */
7119 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7120 if (!anp && avp)
7121 *anpp = anp = VTONFS(avp);
7122
7123 nfs_node_clear_busy(adnp);
7124 adbusyerror = ENOENT;
7125
7126 /* check for directory access */
7127 naa.a_desc = &vnop_access_desc;
7128 naa.a_vp = NFSTOV(adnp);
7129 naa.a_action = KAUTH_VNODE_SEARCH;
7130 naa.a_context = ctx;
7131
7132 /* compute actual success/failure based on accessibility */
7133 error = nfs_vnop_access(&naa);
7134 /* FALLTHROUGH */
7135 default:
7136 /* we either found it, or hit an error */
7137 if (!error && guarded) {
7138 /* found cached entry but told not to use it */
7139 error = EEXIST;
7140 vnode_put(NFSTOV(anp));
7141 *anpp = anp = NULL;
7142 }
7143 /* we're done if error or we don't need to open */
7144 if (error || !open)
7145 goto nfsmout;
7146 /* no error and we need to open... */
7147 }
7148 }
7149
7150 if (open) {
7151 restart:
7152 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7153 if (error) {
7154 nfs_open_owner_rele(noop);
7155 noop = NULL;
7156 goto nfsmout;
7157 }
7158 inuse = 1;
7159
7160 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7161 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7162 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7163 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7164 error = EIO;
7165 }
7166 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7167 nfs_mount_state_in_use_end(nmp, 0);
7168 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7169 nfs_open_file_destroy(newnofp);
7170 newnofp = NULL;
7171 if (!error)
7172 goto restart;
7173 }
7174 if (!error)
7175 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7176 if (error) {
7177 if (newnofp)
7178 nfs_open_file_destroy(newnofp);
7179 newnofp = NULL;
7180 goto nfsmout;
7181 }
7182 if (anp) {
7183 /*
7184 * We already have the node. So we just need to open
7185 * it - which we may be able to do with a delegation.
7186 */
7187 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7188 if (!error) {
7189 /* open succeeded, so our open file is no longer temporary */
7190 nofp = newnofp;
7191 nofpbusyerror = 0;
7192 newnofp = NULL;
7193 if (nofpp)
7194 *nofpp = nofp;
7195 }
7196 goto nfsmout;
7197 }
7198 }
7199
7200 /*
7201 * We either don't have the attrdir or we didn't find the attribute
7202 * in the name cache, so we need to talk to the server.
7203 *
7204 * If we don't have the attrdir, we'll need to ask the server for that too.
7205 * If the caller is requesting that the attribute be created, we need to
7206 * make sure the attrdir is created.
7207 * The caller may also request that the first block of an existing attribute
7208 * be retrieved at the same time.
7209 */
7210
7211 if (open) {
7212 /* need to mark the open owner busy during the RPC */
7213 if ((error = nfs_open_owner_set_busy(noop, thd)))
7214 goto nfsmout;
7215 noopbusy = 1;
7216 }
7217
7218 /*
7219 * We'd like to get updated post-open/lookup attributes for the
7220 * directory and we may also want to prefetch some data via READ.
7221 * We'd like the READ results to be last so that we can leave the
7222 * data in the mbufs until the end.
7223 *
7224 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7225 */
7226 numops = 5;
7227 if (!hadattrdir)
7228 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7229 if (prefetch)
7230 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7231 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7232 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7233 if (hadattrdir) {
7234 numops--;
7235 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7236 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7237 } else {
7238 numops--;
7239 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7240 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7241 numops--;
7242 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7243 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7244 numops--;
7245 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7246 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7247 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7248 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7249 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7250 }
7251 if (open) {
7252 numops--;
7253 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7254 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7255 nfsm_chain_add_32(error, &nmreq, accessMode);
7256 nfsm_chain_add_32(error, &nmreq, denyMode);
7257 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7258 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7259 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7260 nfsm_chain_add_32(error, &nmreq, create);
7261 if (create) {
7262 nfsm_chain_add_32(error, &nmreq, guarded);
7263 VATTR_INIT(&vattr);
7264 if (truncate)
7265 VATTR_SET(&vattr, va_data_size, 0);
7266 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7267 }
7268 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7269 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7270 } else {
7271 numops--;
7272 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7273 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7274 }
7275 numops--;
7276 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7277 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7278 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7279 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7280 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7281 if (prefetch) {
7282 numops--;
7283 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7284 }
7285 if (hadattrdir) {
7286 numops--;
7287 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7288 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7289 } else {
7290 numops--;
7291 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7292 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7293 numops--;
7294 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7295 nfsm_chain_add_32(error, &nmreq, 0);
7296 }
7297 numops--;
7298 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7299 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7300 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7301 if (prefetch) {
7302 numops--;
7303 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7304 numops--;
7305 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7306 VATTR_INIT(&vattr);
7307 VATTR_SET(&vattr, va_data_size, 0);
7308 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7309 numops--;
7310 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7311 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7312 nfsm_chain_add_64(error, &nmreq, 0);
7313 nfsm_chain_add_32(error, &nmreq, rlen);
7314 }
7315 nfsm_chain_build_done(error, &nmreq);
7316 nfsm_assert(error, (numops == 0), EPROTO);
7317 nfsmout_if(error);
7318 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7319 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7320 if (!error)
7321 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7322
7323 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp))))
7324 error = adlockerror;
7325 savedxid = xid;
7326 nfsm_chain_skip_tag(error, &nmrep);
7327 nfsm_chain_get_32(error, &nmrep, numops);
7328 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7329 if (!hadattrdir) {
7330 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7331 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7332 nfsmout_if(error);
7333 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7334 nfsmout_if(error);
7335 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7336 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7337 /* (re)allocate attrdir fh buffer */
7338 if (np->n_attrdirfh)
7339 FREE(np->n_attrdirfh, M_TEMP);
7340 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
7341 }
7342 if (np->n_attrdirfh) {
7343 /* remember the attrdir fh in the node */
7344 *np->n_attrdirfh = fh.fh_len;
7345 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
7346 /* create busied node for attrdir */
7347 struct componentname cn;
7348 bzero(&cn, sizeof(cn));
7349 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7350 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7351 cn.cn_nameiop = LOOKUP;
7352 // XXX can't set parent correctly (to np) yet
7353 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7354 if (!error) {
7355 adlockerror = 0;
7356 /* set the node busy */
7357 SET(adnp->n_flag, NBUSY);
7358 adbusyerror = 0;
7359 }
7360 /* if no adnp, oh well... */
7361 error = 0;
7362 }
7363 }
7364 NVATTR_CLEANUP(&nvattr);
7365 fh.fh_len = 0;
7366 }
7367 if (open) {
7368 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7369 nfs_owner_seqid_increment(noop, NULL, error);
7370 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7371 nfsm_chain_check_change_info(error, &nmrep, adnp);
7372 nfsm_chain_get_32(error, &nmrep, rflags);
7373 bmlen = NFS_ATTR_BITMAP_LEN;
7374 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7375 nfsm_chain_get_32(error, &nmrep, delegation);
7376 if (!error)
7377 switch (delegation) {
7378 case NFS_OPEN_DELEGATE_NONE:
7379 break;
7380 case NFS_OPEN_DELEGATE_READ:
7381 case NFS_OPEN_DELEGATE_WRITE:
7382 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7383 nfsm_chain_get_32(error, &nmrep, recall);
7384 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
7385 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7386 /* if we have any trouble accepting the ACE, just invalidate it */
7387 ace_type = ace_flags = ace_mask = len = 0;
7388 nfsm_chain_get_32(error, &nmrep, ace_type);
7389 nfsm_chain_get_32(error, &nmrep, ace_flags);
7390 nfsm_chain_get_32(error, &nmrep, ace_mask);
7391 nfsm_chain_get_32(error, &nmrep, len);
7392 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7393 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7394 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7395 if (!error && (len >= slen)) {
7396 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
7397 if (s)
7398 slen = len+1;
7399 else
7400 ace.ace_flags = 0;
7401 }
7402 if (s)
7403 nfsm_chain_get_opaque(error, &nmrep, len, s);
7404 else
7405 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7406 if (!error && s) {
7407 s[len] = '\0';
7408 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
7409 ace.ace_flags = 0;
7410 }
7411 if (error || !s)
7412 ace.ace_flags = 0;
7413 if (s && (s != sbuf))
7414 FREE(s, M_TEMP);
7415 break;
7416 default:
7417 error = EBADRPC;
7418 break;
7419 }
7420 /* At this point if we have no error, the object was created/opened. */
7421 open_error = error;
7422 } else {
7423 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7424 }
7425 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7426 nfsmout_if(error);
7427 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7428 nfsmout_if(error);
7429 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7430 error = EIO;
7431 goto nfsmout;
7432 }
7433 if (prefetch)
7434 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7435 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7436 if (!hadattrdir)
7437 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7438 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7439 nfsmout_if(error);
7440 xid = savedxid;
7441 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
7442 nfsmout_if(error);
7443
7444 if (open) {
7445 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
7446 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7447 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
7448 if (adnp) {
7449 nfs_node_unlock(adnp);
7450 adlockerror = ENOENT;
7451 }
7452 NVATTR_CLEANUP(&nvattr);
7453 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
7454 nfsmout_if(error);
7455 savedxid = xid;
7456 if ((adlockerror = nfs_node_lock(adnp)))
7457 error = adlockerror;
7458 }
7459 }
7460
7461 nfsmout:
7462 if (open && adnp && !adlockerror) {
7463 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
7464 adnp->n_flag &= ~NNEGNCENTRIES;
7465 cache_purge_negatives(NFSTOV(adnp));
7466 }
7467 adnp->n_flag |= NMODIFIED;
7468 nfs_node_unlock(adnp);
7469 adlockerror = ENOENT;
7470 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7471 }
7472 if (adnp && !adlockerror && (error == ENOENT) &&
7473 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
7474 /* add a negative entry in the name cache */
7475 cache_enter(NFSTOV(adnp), NULL, cnp);
7476 adnp->n_flag |= NNEGNCENTRIES;
7477 }
7478 if (adnp && !adlockerror) {
7479 nfs_node_unlock(adnp);
7480 adlockerror = ENOENT;
7481 }
7482 if (!error && !anp && fh.fh_len) {
7483 /* create the vnode with the filehandle and attributes */
7484 xid = savedxid;
7485 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
7486 if (!error) {
7487 *anpp = anp;
7488 nfs_node_unlock(anp);
7489 }
7490 if (!error && open) {
7491 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7492 /* After we have a node, add our open file struct to the node */
7493 nofp = newnofp;
7494 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
7495 if (error) {
7496 /* This shouldn't happen, because we passed in a new nofp to use. */
7497 printf("nfs_open_file_find_internal failed! %d\n", error);
7498 nofp = NULL;
7499 } else if (nofp != newnofp) {
7500 /*
7501 * Hmm... an open file struct already exists.
7502 * Mark the existing one busy and merge our open into it.
7503 * Then destroy the one we created.
7504 * Note: there's no chance of an open confict because the
7505 * open has already been granted.
7506 */
7507 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
7508 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7509 nofp->nof_stateid = newnofp->nof_stateid;
7510 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
7511 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7512 nfs_open_file_clear_busy(newnofp);
7513 nfs_open_file_destroy(newnofp);
7514 newnofp = NULL;
7515 }
7516 if (!error) {
7517 newnofp = NULL;
7518 nofpbusyerror = 0;
7519 /* mark the node as holding a create-initiated open */
7520 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7521 nofp->nof_creator = current_thread();
7522 if (nofpp)
7523 *nofpp = nofp;
7524 }
7525 }
7526 }
7527 NVATTR_CLEANUP(&nvattr);
7528 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
7529 if (!error && anp && !recall) {
7530 /* stuff the delegation state in the node */
7531 lck_mtx_lock(&anp->n_openlock);
7532 anp->n_openflags &= ~N_DELEG_MASK;
7533 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7534 anp->n_dstateid = dstateid;
7535 anp->n_dace = ace;
7536 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7537 lck_mtx_lock(&nmp->nm_lock);
7538 if (anp->n_dlink.tqe_next == NFSNOLIST)
7539 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7540 lck_mtx_unlock(&nmp->nm_lock);
7541 }
7542 lck_mtx_unlock(&anp->n_openlock);
7543 } else {
7544 /* give the delegation back */
7545 if (anp) {
7546 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
7547 /* update delegation state and return it */
7548 lck_mtx_lock(&anp->n_openlock);
7549 anp->n_openflags &= ~N_DELEG_MASK;
7550 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7551 anp->n_dstateid = dstateid;
7552 anp->n_dace = ace;
7553 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7554 lck_mtx_lock(&nmp->nm_lock);
7555 if (anp->n_dlink.tqe_next == NFSNOLIST)
7556 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7557 lck_mtx_unlock(&nmp->nm_lock);
7558 }
7559 lck_mtx_unlock(&anp->n_openlock);
7560 /* don't need to send a separate delegreturn for fh */
7561 fh.fh_len = 0;
7562 }
7563 /* return anp's current delegation */
7564 nfs4_delegation_return(anp, 0, thd, cred);
7565 }
7566 if (fh.fh_len) /* return fh's delegation if it wasn't for anp */
7567 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
7568 }
7569 }
7570 if (open) {
7571 if (newnofp) {
7572 /* need to cleanup our temporary nofp */
7573 nfs_open_file_clear_busy(newnofp);
7574 nfs_open_file_destroy(newnofp);
7575 newnofp = NULL;
7576 } else if (nofp && !nofpbusyerror) {
7577 nfs_open_file_clear_busy(nofp);
7578 nofpbusyerror = ENOENT;
7579 }
7580 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
7581 inuse = 0;
7582 nofp = newnofp = NULL;
7583 rflags = delegation = recall = eof = rlen = retlen = 0;
7584 ace.ace_flags = 0;
7585 s = sbuf;
7586 slen = sizeof(sbuf);
7587 nfsm_chain_cleanup(&nmreq);
7588 nfsm_chain_cleanup(&nmrep);
7589 if (anp) {
7590 vnode_put(NFSTOV(anp));
7591 *anpp = anp = NULL;
7592 }
7593 hadattrdir = (adnp != NULL);
7594 if (noopbusy) {
7595 nfs_open_owner_clear_busy(noop);
7596 noopbusy = 0;
7597 }
7598 goto restart;
7599 }
7600 if (noop) {
7601 if (noopbusy) {
7602 nfs_open_owner_clear_busy(noop);
7603 noopbusy = 0;
7604 }
7605 nfs_open_owner_rele(noop);
7606 }
7607 }
7608 if (!error && prefetch && nmrep.nmc_mhead) {
7609 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7610 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
7611 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
7612 nfsm_chain_get_32(error, &nmrep, eof);
7613 nfsm_chain_get_32(error, &nmrep, retlen);
7614 if (!error && anp) {
7615 /*
7616 * There can be one problem with doing the prefetch.
7617 * Because we don't have the node before we start the RPC, we
7618 * can't have the buffer busy while the READ is performed.
7619 * So there is a chance that other I/O occured on the same
7620 * range of data while we were performing this RPC. If that
7621 * happens, then it's possible the data we have in the READ
7622 * response is no longer up to date.
7623 * Once we have the node and the buffer, we need to make sure
7624 * that there's no chance we could be putting stale data in
7625 * the buffer.
7626 * So, we check if the range read is dirty or if any I/O may
7627 * have occured on it while we were performing our RPC.
7628 */
7629 struct nfsbuf *bp = NULL;
7630 int lastpg;
7631 uint32_t pagemask;
7632
7633 retlen = MIN(retlen, rlen);
7634
7635 /* check if node needs size update or invalidation */
7636 if (ISSET(anp->n_flag, NUPDATESIZE))
7637 nfs_data_update_size(anp, 0);
7638 if (!(error = nfs_node_lock(anp))) {
7639 if (anp->n_flag & NNEEDINVALIDATE) {
7640 anp->n_flag &= ~NNEEDINVALIDATE;
7641 nfs_node_unlock(anp);
7642 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
7643 if (!error) /* lets play it safe and just drop the data */
7644 error = EIO;
7645 } else {
7646 nfs_node_unlock(anp);
7647 }
7648 }
7649
7650 /* calculate page mask for the range of data read */
7651 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
7652 pagemask = ((1 << (lastpg + 1)) - 1);
7653
7654 if (!error)
7655 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
7656 /* don't save the data if dirty or potential I/O conflict */
7657 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
7658 timevalcmp(&anp->n_lastio, &now, <)) {
7659 OSAddAtomic64(1, &nfsstats.read_bios);
7660 CLR(bp->nb_flags, (NB_DONE|NB_ASYNC));
7661 SET(bp->nb_flags, NB_READ);
7662 NFS_BUF_MAP(bp);
7663 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
7664 if (error) {
7665 bp->nb_error = error;
7666 SET(bp->nb_flags, NB_ERROR);
7667 } else {
7668 bp->nb_offio = 0;
7669 bp->nb_endio = rlen;
7670 if ((retlen > 0) && (bp->nb_endio < (int)retlen))
7671 bp->nb_endio = retlen;
7672 if (eof || (retlen == 0)) {
7673 /* zero out the remaining data (up to EOF) */
7674 off_t rpcrem, eofrem, rem;
7675 rpcrem = (rlen - retlen);
7676 eofrem = anp->n_size - (NBOFF(bp) + retlen);
7677 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
7678 if (rem > 0)
7679 bzero(bp->nb_data + retlen, rem);
7680 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
7681 /* ugh... short read ... just invalidate for now... */
7682 SET(bp->nb_flags, NB_INVAL);
7683 }
7684 }
7685 nfs_buf_read_finish(bp);
7686 microuptime(&anp->n_lastio);
7687 }
7688 if (bp)
7689 nfs_buf_release(bp, 1);
7690 }
7691 error = 0; /* ignore any transient error in processing the prefetch */
7692 }
7693 if (adnp && !adbusyerror) {
7694 nfs_node_clear_busy(adnp);
7695 adbusyerror = ENOENT;
7696 }
7697 if (!busyerror) {
7698 nfs_node_clear_busy(np);
7699 busyerror = ENOENT;
7700 }
7701 if (adnp)
7702 vnode_put(NFSTOV(adnp));
7703 if (error && *anpp) {
7704 vnode_put(NFSTOV(*anpp));
7705 *anpp = NULL;
7706 }
7707 nfsm_chain_cleanup(&nmreq);
7708 nfsm_chain_cleanup(&nmrep);
7709 return (error);
7710 }
7711
7712 /*
7713 * Remove a named attribute.
7714 */
7715 int
7716 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
7717 {
7718 nfsnode_t adnp = NULL;
7719 struct nfsmount *nmp;
7720 struct componentname cn;
7721 struct vnop_remove_args vra;
7722 int error, putanp = 0;
7723
7724 nmp = NFSTONMP(np);
7725 if (nfs_mount_gone(nmp))
7726 return (ENXIO);
7727
7728 bzero(&cn, sizeof(cn));
7729 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7730 cn.cn_namelen = strlen(name);
7731 cn.cn_nameiop = DELETE;
7732 cn.cn_flags = 0;
7733
7734 if (!anp) {
7735 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7736 0, ctx, &anp, NULL);
7737 if ((!error && !anp) || (error == ENOATTR))
7738 error = ENOENT;
7739 if (error) {
7740 if (anp) {
7741 vnode_put(NFSTOV(anp));
7742 anp = NULL;
7743 }
7744 goto out;
7745 }
7746 putanp = 1;
7747 }
7748
7749 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7750 goto out;
7751 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7752 nfs_node_clear_busy(np);
7753 if (!adnp) {
7754 error = ENOENT;
7755 goto out;
7756 }
7757
7758 vra.a_desc = &vnop_remove_desc;
7759 vra.a_dvp = NFSTOV(adnp);
7760 vra.a_vp = NFSTOV(anp);
7761 vra.a_cnp = &cn;
7762 vra.a_flags = 0;
7763 vra.a_context = ctx;
7764 error = nfs_vnop_remove(&vra);
7765 out:
7766 if (adnp)
7767 vnode_put(NFSTOV(adnp));
7768 if (putanp)
7769 vnode_put(NFSTOV(anp));
7770 return (error);
7771 }
7772
7773 int
7774 nfs4_vnop_getxattr(
7775 struct vnop_getxattr_args /* {
7776 struct vnodeop_desc *a_desc;
7777 vnode_t a_vp;
7778 const char * a_name;
7779 uio_t a_uio;
7780 size_t *a_size;
7781 int a_options;
7782 vfs_context_t a_context;
7783 } */ *ap)
7784 {
7785 vfs_context_t ctx = ap->a_context;
7786 struct nfsmount *nmp;
7787 struct nfs_vattr nvattr;
7788 struct componentname cn;
7789 nfsnode_t anp;
7790 int error = 0, isrsrcfork;
7791
7792 nmp = VTONMP(ap->a_vp);
7793 if (nfs_mount_gone(nmp))
7794 return (ENXIO);
7795
7796 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7797 return (ENOTSUP);
7798 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
7799 if (error)
7800 return (error);
7801 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7802 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7803 return (ENOATTR);
7804
7805 bzero(&cn, sizeof(cn));
7806 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
7807 cn.cn_namelen = strlen(ap->a_name);
7808 cn.cn_nameiop = LOOKUP;
7809 cn.cn_flags = MAKEENTRY;
7810
7811 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7812 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7813
7814 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7815 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
7816 if ((!error && !anp) || (error == ENOENT))
7817 error = ENOATTR;
7818 if (!error) {
7819 if (ap->a_uio)
7820 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
7821 else
7822 *ap->a_size = anp->n_size;
7823 }
7824 if (anp)
7825 vnode_put(NFSTOV(anp));
7826 return (error);
7827 }
7828
7829 int
7830 nfs4_vnop_setxattr(
7831 struct vnop_setxattr_args /* {
7832 struct vnodeop_desc *a_desc;
7833 vnode_t a_vp;
7834 const char * a_name;
7835 uio_t a_uio;
7836 int a_options;
7837 vfs_context_t a_context;
7838 } */ *ap)
7839 {
7840 vfs_context_t ctx = ap->a_context;
7841 int options = ap->a_options;
7842 uio_t uio = ap->a_uio;
7843 const char *name = ap->a_name;
7844 struct nfsmount *nmp;
7845 struct componentname cn;
7846 nfsnode_t anp = NULL;
7847 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
7848 #define FINDERINFOSIZE 32
7849 uint8_t finfo[FINDERINFOSIZE];
7850 uint32_t *finfop;
7851 struct nfs_open_file *nofp = NULL;
7852 char uio_buf [ UIO_SIZEOF(1) ];
7853 uio_t auio;
7854 struct vnop_write_args vwa;
7855
7856 nmp = VTONMP(ap->a_vp);
7857 if (nfs_mount_gone(nmp))
7858 return (ENXIO);
7859
7860 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7861 return (ENOTSUP);
7862
7863 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE))
7864 return (EINVAL);
7865
7866 /* XXX limitation based on need to back up uio on short write */
7867 if (uio_iovcnt(uio) > 1) {
7868 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7869 return (EINVAL);
7870 }
7871
7872 bzero(&cn, sizeof(cn));
7873 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7874 cn.cn_namelen = strlen(name);
7875 cn.cn_nameiop = CREATE;
7876 cn.cn_flags = MAKEENTRY;
7877
7878 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
7879 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7880 if (!isrsrcfork)
7881 uio_setoffset(uio, 0);
7882 if (isfinderinfo) {
7883 if (uio_resid(uio) != sizeof(finfo))
7884 return (ERANGE);
7885 error = uiomove((char*)&finfo, sizeof(finfo), uio);
7886 if (error)
7887 return (error);
7888 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7889 empty = 1;
7890 for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++)
7891 if (finfop[i]) {
7892 empty = 0;
7893 break;
7894 }
7895 if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) {
7896 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7897 if (error == ENOENT)
7898 error = 0;
7899 return (error);
7900 }
7901 /* first, let's see if we get a create/replace error */
7902 }
7903
7904 /*
7905 * create/open the xattr
7906 *
7907 * We need to make sure not to create it if XATTR_REPLACE.
7908 * For all xattrs except the resource fork, we also want to
7909 * truncate the xattr to remove any current data. We'll do
7910 * that by setting the size to 0 on create/open.
7911 */
7912 flags = 0;
7913 if (!(options & XATTR_REPLACE))
7914 flags |= NFS_GET_NAMED_ATTR_CREATE;
7915 if (options & XATTR_CREATE)
7916 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
7917 if (!isrsrcfork)
7918 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
7919
7920 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
7921 flags, ctx, &anp, &nofp);
7922 if (!error && !anp)
7923 error = ENOATTR;
7924 if (error)
7925 goto out;
7926 /* grab the open state from the get/create/open */
7927 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
7928 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
7929 nofp->nof_creator = NULL;
7930 nfs_open_file_clear_busy(nofp);
7931 }
7932
7933 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7934 if (isfinderinfo && empty)
7935 goto doclose;
7936
7937 /*
7938 * Write the data out and flush.
7939 *
7940 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7941 */
7942 vwa.a_desc = &vnop_write_desc;
7943 vwa.a_vp = NFSTOV(anp);
7944 vwa.a_uio = NULL;
7945 vwa.a_ioflag = 0;
7946 vwa.a_context = ctx;
7947 if (isfinderinfo) {
7948 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
7949 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
7950 vwa.a_uio = auio;
7951 } else if (uio_resid(uio) > 0) {
7952 vwa.a_uio = uio;
7953 }
7954 if (vwa.a_uio) {
7955 error = nfs_vnop_write(&vwa);
7956 if (!error)
7957 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
7958 }
7959 doclose:
7960 /* Close the xattr. */
7961 if (nofp) {
7962 int busyerror = nfs_open_file_set_busy(nofp, NULL);
7963 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
7964 if (!busyerror)
7965 nfs_open_file_clear_busy(nofp);
7966 }
7967 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
7968 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7969 if (error == ENOENT)
7970 error = 0;
7971 }
7972 if (!error)
7973 error = closeerror;
7974 out:
7975 if (anp)
7976 vnode_put(NFSTOV(anp));
7977 if (error == ENOENT)
7978 error = ENOATTR;
7979 return (error);
7980 }
7981
7982 int
7983 nfs4_vnop_removexattr(
7984 struct vnop_removexattr_args /* {
7985 struct vnodeop_desc *a_desc;
7986 vnode_t a_vp;
7987 const char * a_name;
7988 int a_options;
7989 vfs_context_t a_context;
7990 } */ *ap)
7991 {
7992 struct nfsmount *nmp = VTONMP(ap->a_vp);
7993 int error;
7994
7995 if (nfs_mount_gone(nmp))
7996 return (ENXIO);
7997 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7998 return (ENOTSUP);
7999
8000 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8001 if (error == ENOENT)
8002 error = ENOATTR;
8003 return (error);
8004 }
8005
8006 int
8007 nfs4_vnop_listxattr(
8008 struct vnop_listxattr_args /* {
8009 struct vnodeop_desc *a_desc;
8010 vnode_t a_vp;
8011 uio_t a_uio;
8012 size_t *a_size;
8013 int a_options;
8014 vfs_context_t a_context;
8015 } */ *ap)
8016 {
8017 vfs_context_t ctx = ap->a_context;
8018 nfsnode_t np = VTONFS(ap->a_vp);
8019 uio_t uio = ap->a_uio;
8020 nfsnode_t adnp = NULL;
8021 struct nfsmount *nmp;
8022 int error, done, i;
8023 struct nfs_vattr nvattr;
8024 uint64_t cookie, nextcookie, lbn = 0;
8025 struct nfsbuf *bp = NULL;
8026 struct nfs_dir_buf_header *ndbhp;
8027 struct direntry *dp;
8028
8029 nmp = VTONMP(ap->a_vp);
8030 if (nfs_mount_gone(nmp))
8031 return (ENXIO);
8032
8033 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8034 return (ENOTSUP);
8035
8036 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8037 if (error)
8038 return (error);
8039 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8040 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8041 return (0);
8042
8043 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
8044 return (error);
8045 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8046 nfs_node_clear_busy(np);
8047 if (!adnp)
8048 goto out;
8049
8050 if ((error = nfs_node_lock(adnp)))
8051 goto out;
8052
8053 if (adnp->n_flag & NNEEDINVALIDATE) {
8054 adnp->n_flag &= ~NNEEDINVALIDATE;
8055 nfs_invaldir(adnp);
8056 nfs_node_unlock(adnp);
8057 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8058 if (!error)
8059 error = nfs_node_lock(adnp);
8060 if (error)
8061 goto out;
8062 }
8063
8064 /*
8065 * check for need to invalidate when (re)starting at beginning
8066 */
8067 if (adnp->n_flag & NMODIFIED) {
8068 nfs_invaldir(adnp);
8069 nfs_node_unlock(adnp);
8070 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1)))
8071 goto out;
8072 } else {
8073 nfs_node_unlock(adnp);
8074 }
8075 /* nfs_getattr() will check changed and purge caches */
8076 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED)))
8077 goto out;
8078
8079 if (uio && (uio_resid(uio) == 0))
8080 goto out;
8081
8082 done = 0;
8083 nextcookie = lbn = 0;
8084
8085 while (!error && !done) {
8086 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8087 cookie = nextcookie;
8088 getbuffer:
8089 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8090 if (error)
8091 goto out;
8092 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8093 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8094 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8095 ndbhp->ndbh_flags = 0;
8096 ndbhp->ndbh_count = 0;
8097 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8098 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8099 }
8100 error = nfs_buf_readdir(bp, ctx);
8101 if (error == NFSERR_DIRBUFDROPPED)
8102 goto getbuffer;
8103 if (error)
8104 nfs_buf_release(bp, 1);
8105 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8106 if (!nfs_node_lock(adnp)) {
8107 nfs_invaldir(adnp);
8108 nfs_node_unlock(adnp);
8109 }
8110 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8111 if (error == NFSERR_BAD_COOKIE)
8112 error = ENOENT;
8113 }
8114 if (error)
8115 goto out;
8116 }
8117
8118 /* go through all the entries copying/counting */
8119 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8120 for (i=0; i < ndbhp->ndbh_count; i++) {
8121 if (!xattr_protected(dp->d_name)) {
8122 if (uio == NULL) {
8123 *ap->a_size += dp->d_namlen + 1;
8124 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8125 error = ERANGE;
8126 } else {
8127 error = uiomove(dp->d_name, dp->d_namlen+1, uio);
8128 if (error && (error != EFAULT))
8129 error = ERANGE;
8130 }
8131 }
8132 nextcookie = dp->d_seekoff;
8133 dp = NFS_DIRENTRY_NEXT(dp);
8134 }
8135
8136 if (i == ndbhp->ndbh_count) {
8137 /* hit end of buffer, move to next buffer */
8138 lbn = nextcookie;
8139 /* if we also hit EOF, we're done */
8140 if (ISSET(ndbhp->ndbh_flags, NDB_EOF))
8141 done = 1;
8142 }
8143 if (!error && !done && (nextcookie == cookie)) {
8144 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8145 error = EIO;
8146 }
8147 nfs_buf_release(bp, 1);
8148 }
8149 out:
8150 if (adnp)
8151 vnode_put(NFSTOV(adnp));
8152 return (error);
8153 }
8154
8155 #if NAMEDSTREAMS
8156 int
8157 nfs4_vnop_getnamedstream(
8158 struct vnop_getnamedstream_args /* {
8159 struct vnodeop_desc *a_desc;
8160 vnode_t a_vp;
8161 vnode_t *a_svpp;
8162 const char *a_name;
8163 enum nsoperation a_operation;
8164 int a_flags;
8165 vfs_context_t a_context;
8166 } */ *ap)
8167 {
8168 vfs_context_t ctx = ap->a_context;
8169 struct nfsmount *nmp;
8170 struct nfs_vattr nvattr;
8171 struct componentname cn;
8172 nfsnode_t anp;
8173 int error = 0;
8174
8175 nmp = VTONMP(ap->a_vp);
8176 if (nfs_mount_gone(nmp))
8177 return (ENXIO);
8178
8179 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8180 return (ENOTSUP);
8181 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8182 if (error)
8183 return (error);
8184 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8185 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8186 return (ENOATTR);
8187
8188 bzero(&cn, sizeof(cn));
8189 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8190 cn.cn_namelen = strlen(ap->a_name);
8191 cn.cn_nameiop = LOOKUP;
8192 cn.cn_flags = MAKEENTRY;
8193
8194 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8195 0, ctx, &anp, NULL);
8196 if ((!error && !anp) || (error == ENOENT))
8197 error = ENOATTR;
8198 if (!error && anp)
8199 *ap->a_svpp = NFSTOV(anp);
8200 else if (anp)
8201 vnode_put(NFSTOV(anp));
8202 return (error);
8203 }
8204
8205 int
8206 nfs4_vnop_makenamedstream(
8207 struct vnop_makenamedstream_args /* {
8208 struct vnodeop_desc *a_desc;
8209 vnode_t *a_svpp;
8210 vnode_t a_vp;
8211 const char *a_name;
8212 int a_flags;
8213 vfs_context_t a_context;
8214 } */ *ap)
8215 {
8216 vfs_context_t ctx = ap->a_context;
8217 struct nfsmount *nmp;
8218 struct componentname cn;
8219 nfsnode_t anp;
8220 int error = 0;
8221
8222 nmp = VTONMP(ap->a_vp);
8223 if (nfs_mount_gone(nmp))
8224 return (ENXIO);
8225
8226 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8227 return (ENOTSUP);
8228
8229 bzero(&cn, sizeof(cn));
8230 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8231 cn.cn_namelen = strlen(ap->a_name);
8232 cn.cn_nameiop = CREATE;
8233 cn.cn_flags = MAKEENTRY;
8234
8235 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8236 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8237 if ((!error && !anp) || (error == ENOENT))
8238 error = ENOATTR;
8239 if (!error && anp)
8240 *ap->a_svpp = NFSTOV(anp);
8241 else if (anp)
8242 vnode_put(NFSTOV(anp));
8243 return (error);
8244 }
8245
8246 int
8247 nfs4_vnop_removenamedstream(
8248 struct vnop_removenamedstream_args /* {
8249 struct vnodeop_desc *a_desc;
8250 vnode_t a_vp;
8251 vnode_t a_svp;
8252 const char *a_name;
8253 int a_flags;
8254 vfs_context_t a_context;
8255 } */ *ap)
8256 {
8257 struct nfsmount *nmp = VTONMP(ap->a_vp);
8258 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8259 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8260
8261 if (nfs_mount_gone(nmp))
8262 return (ENXIO);
8263
8264 /*
8265 * Given that a_svp is a named stream, checking for
8266 * named attribute support is kinda pointless.
8267 */
8268 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8269 return (ENOTSUP);
8270
8271 return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context));
8272 }
8273
8274 #endif