]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
52
53 #include <vfs/vfs_support.h>
54
55 #include <sys/vm.h>
56
57 #include <sys/time.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
60
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
63
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
66 #include <nfs/nfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
73
74 #include <net/if.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
78
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
81
82 int
83 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
84 {
85 int error = 0, lockerror = ENOENT, status, numops, slot;
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
89 uint32_t access_result = 0, supported = 0, missing;
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
93 struct nfsreq_secinfo_args si;
94
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
96 return (0);
97
98 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
99 nfsm_chain_null(&nmreq);
100 nfsm_chain_null(&nmrep);
101
102 // PUTFH, ACCESS, GETATTR
103 numops = 3;
104 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
105 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
106 numops--;
107 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
108 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
109 numops--;
110 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
111 nfsm_chain_add_32(error, &nmreq, *access);
112 numops--;
113 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
114 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
115 nfsm_chain_build_done(error, &nmreq);
116 nfsm_assert(error, (numops == 0), EPROTO);
117 nfsmout_if(error);
118 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
119 vfs_context_thread(ctx), vfs_context_ucred(ctx),
120 &si, rpcflags, &nmrep, &xid, &status);
121
122 if ((lockerror = nfs_node_lock(np)))
123 error = lockerror;
124 nfsm_chain_skip_tag(error, &nmrep);
125 nfsm_chain_get_32(error, &nmrep, numops);
126 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
127 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
128 nfsm_chain_get_32(error, &nmrep, supported);
129 nfsm_chain_get_32(error, &nmrep, access_result);
130 nfsmout_if(error);
131 if ((missing = (*access & ~supported))) {
132 /* missing support for something(s) we wanted */
133 if (missing & NFS_ACCESS_DELETE) {
134 /*
135 * If the server doesn't report DELETE (possible
136 * on UNIX systems), we'll assume that it is OK
137 * and just let any subsequent delete action fail
138 * if it really isn't deletable.
139 */
140 access_result |= NFS_ACCESS_DELETE;
141 }
142 }
143 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
144 if (nfs_access_dotzfs) {
145 vnode_t dvp = NULLVP;
146 if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */
147 access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE);
148 else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD))
149 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
150 if (dvp != NULLVP)
151 vnode_put(dvp);
152 }
153 /* Some servers report DELETE support but erroneously give a denied answer. */
154 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE))
155 access_result |= NFS_ACCESS_DELETE;
156 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
157 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
158 nfsmout_if(error);
159
160 if (nfs_mount_gone(nmp)) {
161 error = ENXIO;
162 }
163 nfsmout_if(error);
164
165 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
166 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
167 } else {
168 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
169 }
170 slot = nfs_node_access_slot(np, uid, 1);
171 np->n_accessuid[slot] = uid;
172 microuptime(&now);
173 np->n_accessstamp[slot] = now.tv_sec;
174 np->n_access[slot] = access_result;
175
176 /* pass back the access returned with this request */
177 *access = np->n_access[slot];
178 nfsmout:
179 if (!lockerror)
180 nfs_node_unlock(np);
181 nfsm_chain_cleanup(&nmreq);
182 nfsm_chain_cleanup(&nmrep);
183 return (error);
184 }
185
186 int
187 nfs4_getattr_rpc(
188 nfsnode_t np,
189 mount_t mp,
190 u_char *fhp,
191 size_t fhsize,
192 int flags,
193 vfs_context_t ctx,
194 struct nfs_vattr *nvap,
195 u_int64_t *xidp)
196 {
197 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
198 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
199 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
200 struct nfsm_chain nmreq, nmrep;
201 struct nfsreq_secinfo_args si;
202
203 if (nfs_mount_gone(nmp))
204 return (ENXIO);
205 nfsvers = nmp->nm_vers;
206 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
207
208 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
209 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
210 return (0);
211 }
212
213 if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */
214 rpcflags = R_RECOVER;
215
216 if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */
217 rpcflags |= R_SOFT;
218
219 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
220 nfsm_chain_null(&nmreq);
221 nfsm_chain_null(&nmrep);
222
223 // PUTFH, GETATTR
224 numops = 2;
225 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
226 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
227 numops--;
228 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
229 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
230 numops--;
231 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
232 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
233 if ((flags & NGA_ACL) && acls)
234 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
235 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
236 nfsm_chain_build_done(error, &nmreq);
237 nfsm_assert(error, (numops == 0), EPROTO);
238 nfsmout_if(error);
239 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
240 vfs_context_thread(ctx), vfs_context_ucred(ctx),
241 NULL, rpcflags, &nmrep, xidp, &status);
242
243 nfsm_chain_skip_tag(error, &nmrep);
244 nfsm_chain_get_32(error, &nmrep, numops);
245 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
246 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
247 nfsmout_if(error);
248 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
249 nfsmout_if(error);
250 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
251 /* we asked for the ACL but didn't get one... assume there isn't one */
252 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
253 nvap->nva_acl = NULL;
254 }
255 nfsmout:
256 nfsm_chain_cleanup(&nmreq);
257 nfsm_chain_cleanup(&nmrep);
258 return (error);
259 }
260
261 int
262 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
263 {
264 struct nfsmount *nmp;
265 int error = 0, lockerror = ENOENT, status, numops;
266 uint32_t len = 0;
267 u_int64_t xid;
268 struct nfsm_chain nmreq, nmrep;
269 struct nfsreq_secinfo_args si;
270
271 nmp = NFSTONMP(np);
272 if (nfs_mount_gone(nmp))
273 return (ENXIO);
274 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
275 return (EINVAL);
276 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
277 nfsm_chain_null(&nmreq);
278 nfsm_chain_null(&nmrep);
279
280 // PUTFH, GETATTR, READLINK
281 numops = 3;
282 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
283 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
284 numops--;
285 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
286 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
287 numops--;
288 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
289 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
290 numops--;
291 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
292 nfsm_chain_build_done(error, &nmreq);
293 nfsm_assert(error, (numops == 0), EPROTO);
294 nfsmout_if(error);
295 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
296
297 if ((lockerror = nfs_node_lock(np)))
298 error = lockerror;
299 nfsm_chain_skip_tag(error, &nmrep);
300 nfsm_chain_get_32(error, &nmrep, numops);
301 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
302 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
303 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
304 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
305 nfsm_chain_get_32(error, &nmrep, len);
306 nfsmout_if(error);
307 if (len >= *buflenp) {
308 if (np->n_size && (np->n_size < *buflenp))
309 len = np->n_size;
310 else
311 len = *buflenp - 1;
312 }
313 nfsm_chain_get_opaque(error, &nmrep, len, buf);
314 if (!error)
315 *buflenp = len;
316 nfsmout:
317 if (!lockerror)
318 nfs_node_unlock(np);
319 nfsm_chain_cleanup(&nmreq);
320 nfsm_chain_cleanup(&nmrep);
321 return (error);
322 }
323
324 int
325 nfs4_read_rpc_async(
326 nfsnode_t np,
327 off_t offset,
328 size_t len,
329 thread_t thd,
330 kauth_cred_t cred,
331 struct nfsreq_cbinfo *cb,
332 struct nfsreq **reqp)
333 {
334 struct nfsmount *nmp;
335 int error = 0, nfsvers, numops;
336 nfs_stateid stateid;
337 struct nfsm_chain nmreq;
338 struct nfsreq_secinfo_args si;
339
340 nmp = NFSTONMP(np);
341 if (nfs_mount_gone(nmp))
342 return (ENXIO);
343 nfsvers = nmp->nm_vers;
344 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
345 return (EINVAL);
346
347 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
348 nfsm_chain_null(&nmreq);
349
350 // PUTFH, READ, GETATTR
351 numops = 3;
352 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
353 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
354 numops--;
355 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
356 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
357 numops--;
358 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
359 nfs_get_stateid(np, thd, cred, &stateid);
360 nfsm_chain_add_stateid(error, &nmreq, &stateid);
361 nfsm_chain_add_64(error, &nmreq, offset);
362 nfsm_chain_add_32(error, &nmreq, len);
363 numops--;
364 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
365 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
366 nfsm_chain_build_done(error, &nmreq);
367 nfsm_assert(error, (numops == 0), EPROTO);
368 nfsmout_if(error);
369 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
370 nfsmout:
371 nfsm_chain_cleanup(&nmreq);
372 return (error);
373 }
374
375 int
376 nfs4_read_rpc_async_finish(
377 nfsnode_t np,
378 struct nfsreq *req,
379 uio_t uio,
380 size_t *lenp,
381 int *eofp)
382 {
383 struct nfsmount *nmp;
384 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
385 size_t retlen = 0;
386 u_int64_t xid;
387 struct nfsm_chain nmrep;
388
389 nmp = NFSTONMP(np);
390 if (nfs_mount_gone(nmp)) {
391 nfs_request_async_cancel(req);
392 return (ENXIO);
393 }
394 nfsvers = nmp->nm_vers;
395
396 nfsm_chain_null(&nmrep);
397
398 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
399 if (error == EINPROGRESS) /* async request restarted */
400 return (error);
401
402 if ((lockerror = nfs_node_lock(np)))
403 error = lockerror;
404 nfsm_chain_skip_tag(error, &nmrep);
405 nfsm_chain_get_32(error, &nmrep, numops);
406 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
407 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
408 nfsm_chain_get_32(error, &nmrep, eof);
409 nfsm_chain_get_32(error, &nmrep, retlen);
410 if (!error) {
411 *lenp = MIN(retlen, *lenp);
412 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
413 }
414 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
415 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
416 if (!lockerror)
417 nfs_node_unlock(np);
418 if (eofp) {
419 if (!eof && !retlen)
420 eof = 1;
421 *eofp = eof;
422 }
423 nfsm_chain_cleanup(&nmrep);
424 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
425 microuptime(&np->n_lastio);
426 return (error);
427 }
428
429 int
430 nfs4_write_rpc_async(
431 nfsnode_t np,
432 uio_t uio,
433 size_t len,
434 thread_t thd,
435 kauth_cred_t cred,
436 int iomode,
437 struct nfsreq_cbinfo *cb,
438 struct nfsreq **reqp)
439 {
440 struct nfsmount *nmp;
441 mount_t mp;
442 int error = 0, nfsvers, numops;
443 nfs_stateid stateid;
444 struct nfsm_chain nmreq;
445 struct nfsreq_secinfo_args si;
446
447 nmp = NFSTONMP(np);
448 if (nfs_mount_gone(nmp))
449 return (ENXIO);
450 nfsvers = nmp->nm_vers;
451 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
452 return (EINVAL);
453
454 /* for async mounts, don't bother sending sync write requests */
455 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
456 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
457 iomode = NFS_WRITE_UNSTABLE;
458
459 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
460 nfsm_chain_null(&nmreq);
461
462 // PUTFH, WRITE, GETATTR
463 numops = 3;
464 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
465 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
466 numops--;
467 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
468 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
469 numops--;
470 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
471 nfs_get_stateid(np, thd, cred, &stateid);
472 nfsm_chain_add_stateid(error, &nmreq, &stateid);
473 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
474 nfsm_chain_add_32(error, &nmreq, iomode);
475 nfsm_chain_add_32(error, &nmreq, len);
476 if (!error)
477 error = nfsm_chain_add_uio(&nmreq, uio, len);
478 numops--;
479 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
480 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
481 nfsm_chain_build_done(error, &nmreq);
482 nfsm_assert(error, (numops == 0), EPROTO);
483 nfsmout_if(error);
484
485 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
486 nfsmout:
487 nfsm_chain_cleanup(&nmreq);
488 return (error);
489 }
490
491 int
492 nfs4_write_rpc_async_finish(
493 nfsnode_t np,
494 struct nfsreq *req,
495 int *iomodep,
496 size_t *rlenp,
497 uint64_t *wverfp)
498 {
499 struct nfsmount *nmp;
500 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
501 int committed = NFS_WRITE_FILESYNC;
502 size_t rlen = 0;
503 u_int64_t xid, wverf;
504 mount_t mp;
505 struct nfsm_chain nmrep;
506
507 nmp = NFSTONMP(np);
508 if (nfs_mount_gone(nmp)) {
509 nfs_request_async_cancel(req);
510 return (ENXIO);
511 }
512 nfsvers = nmp->nm_vers;
513
514 nfsm_chain_null(&nmrep);
515
516 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
517 if (error == EINPROGRESS) /* async request restarted */
518 return (error);
519 nmp = NFSTONMP(np);
520 if (nfs_mount_gone(nmp))
521 error = ENXIO;
522 if (!error && (lockerror = nfs_node_lock(np)))
523 error = lockerror;
524 nfsm_chain_skip_tag(error, &nmrep);
525 nfsm_chain_get_32(error, &nmrep, numops);
526 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
527 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
528 nfsm_chain_get_32(error, &nmrep, rlen);
529 nfsmout_if(error);
530 *rlenp = rlen;
531 if (rlen <= 0)
532 error = NFSERR_IO;
533 nfsm_chain_get_32(error, &nmrep, committed);
534 nfsm_chain_get_64(error, &nmrep, wverf);
535 nfsmout_if(error);
536 if (wverfp)
537 *wverfp = wverf;
538 lck_mtx_lock(&nmp->nm_lock);
539 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
540 nmp->nm_verf = wverf;
541 nmp->nm_state |= NFSSTA_HASWRITEVERF;
542 } else if (nmp->nm_verf != wverf) {
543 nmp->nm_verf = wverf;
544 }
545 lck_mtx_unlock(&nmp->nm_lock);
546 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
547 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
548 nfsmout:
549 if (!lockerror)
550 nfs_node_unlock(np);
551 nfsm_chain_cleanup(&nmrep);
552 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
553 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
554 committed = NFS_WRITE_FILESYNC;
555 *iomodep = committed;
556 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)
557 microuptime(&np->n_lastio);
558 return (error);
559 }
560
561 int
562 nfs4_remove_rpc(
563 nfsnode_t dnp,
564 char *name,
565 int namelen,
566 thread_t thd,
567 kauth_cred_t cred)
568 {
569 int error = 0, lockerror = ENOENT, remove_error = 0, status;
570 struct nfsmount *nmp;
571 int nfsvers, numops;
572 u_int64_t xid;
573 struct nfsm_chain nmreq, nmrep;
574 struct nfsreq_secinfo_args si;
575
576 nmp = NFSTONMP(dnp);
577 if (nfs_mount_gone(nmp))
578 return (ENXIO);
579 nfsvers = nmp->nm_vers;
580 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
581 return (EINVAL);
582 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
583 restart:
584 nfsm_chain_null(&nmreq);
585 nfsm_chain_null(&nmrep);
586
587 // PUTFH, REMOVE, GETATTR
588 numops = 3;
589 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
590 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
591 numops--;
592 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
593 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
594 numops--;
595 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
596 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
597 numops--;
598 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
599 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
600 nfsm_chain_build_done(error, &nmreq);
601 nfsm_assert(error, (numops == 0), EPROTO);
602 nfsmout_if(error);
603
604 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
605
606 if ((lockerror = nfs_node_lock(dnp)))
607 error = lockerror;
608 nfsm_chain_skip_tag(error, &nmrep);
609 nfsm_chain_get_32(error, &nmrep, numops);
610 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
611 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
612 remove_error = error;
613 nfsm_chain_check_change_info(error, &nmrep, dnp);
614 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
615 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
616 if (error && !lockerror)
617 NATTRINVALIDATE(dnp);
618 nfsmout:
619 nfsm_chain_cleanup(&nmreq);
620 nfsm_chain_cleanup(&nmrep);
621
622 if (!lockerror) {
623 dnp->n_flag |= NMODIFIED;
624 nfs_node_unlock(dnp);
625 }
626 if (error == NFSERR_GRACE) {
627 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
628 goto restart;
629 }
630
631 return (remove_error);
632 }
633
634 int
635 nfs4_rename_rpc(
636 nfsnode_t fdnp,
637 char *fnameptr,
638 int fnamelen,
639 nfsnode_t tdnp,
640 char *tnameptr,
641 int tnamelen,
642 vfs_context_t ctx)
643 {
644 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
645 struct nfsmount *nmp;
646 u_int64_t xid, savedxid;
647 struct nfsm_chain nmreq, nmrep;
648 struct nfsreq_secinfo_args si;
649
650 nmp = NFSTONMP(fdnp);
651 if (nfs_mount_gone(nmp))
652 return (ENXIO);
653 nfsvers = nmp->nm_vers;
654 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
655 return (EINVAL);
656 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
657 return (EINVAL);
658
659 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
660 nfsm_chain_null(&nmreq);
661 nfsm_chain_null(&nmrep);
662
663 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
664 numops = 7;
665 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
666 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
667 numops--;
668 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
669 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
670 numops--;
671 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
672 numops--;
673 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
674 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
675 numops--;
676 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
677 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
678 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
679 numops--;
680 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
681 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
682 numops--;
683 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
684 numops--;
685 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
686 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
687 nfsm_chain_build_done(error, &nmreq);
688 nfsm_assert(error, (numops == 0), EPROTO);
689 nfsmout_if(error);
690
691 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
692
693 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
694 error = lockerror;
695 nfsm_chain_skip_tag(error, &nmrep);
696 nfsm_chain_get_32(error, &nmrep, numops);
697 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
698 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
699 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
700 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
701 nfsm_chain_check_change_info(error, &nmrep, fdnp);
702 nfsm_chain_check_change_info(error, &nmrep, tdnp);
703 /* directory attributes: if we don't get them, make sure to invalidate */
704 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
705 savedxid = xid;
706 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
707 if (error && !lockerror)
708 NATTRINVALIDATE(tdnp);
709 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
710 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
711 xid = savedxid;
712 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
713 if (error && !lockerror)
714 NATTRINVALIDATE(fdnp);
715 nfsmout:
716 nfsm_chain_cleanup(&nmreq);
717 nfsm_chain_cleanup(&nmrep);
718 if (!lockerror) {
719 fdnp->n_flag |= NMODIFIED;
720 tdnp->n_flag |= NMODIFIED;
721 nfs_node_unlock2(fdnp, tdnp);
722 }
723 return (error);
724 }
725
726 /*
727 * NFS V4 readdir RPC.
728 */
729 int
730 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
731 {
732 struct nfsmount *nmp;
733 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
734 int i, status, more_entries = 1, eof, bp_dropped = 0;
735 uint32_t nmreaddirsize, nmrsize;
736 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
737 uint64_t cookie, lastcookie, xid, savedxid;
738 struct nfsm_chain nmreq, nmrep, nmrepsave;
739 fhandle_t fh;
740 struct nfs_vattr nvattr, *nvattrp;
741 struct nfs_dir_buf_header *ndbhp;
742 struct direntry *dp;
743 char *padstart, padlen;
744 const char *tag;
745 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
746 struct timeval now;
747 struct nfsreq_secinfo_args si;
748
749 nmp = NFSTONMP(dnp);
750 if (nfs_mount_gone(nmp))
751 return (ENXIO);
752 nfsvers = nmp->nm_vers;
753 nmreaddirsize = nmp->nm_readdirsize;
754 nmrsize = nmp->nm_rsize;
755 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
756 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
757 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
758 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
759 return (EINVAL);
760 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
761
762 /*
763 * Set up attribute request for entries.
764 * For READDIRPLUS functionality, get everything.
765 * Otherwise, just get what we need for struct direntry.
766 */
767 if (rdirplus) {
768 tag = "readdirplus";
769 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
770 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
771 } else {
772 tag = "readdir";
773 NFS_CLEAR_ATTRIBUTES(entry_attrs);
774 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
775 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
776 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
777 }
778 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
779
780 /* lock to protect access to cookie verifier */
781 if ((lockerror = nfs_node_lock(dnp)))
782 return (lockerror);
783
784 /* determine cookie to use, and move dp to the right offset */
785 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
786 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
787 if (ndbhp->ndbh_count) {
788 for (i=0; i < ndbhp->ndbh_count-1; i++)
789 dp = NFS_DIRENTRY_NEXT(dp);
790 cookie = dp->d_seekoff;
791 dp = NFS_DIRENTRY_NEXT(dp);
792 } else {
793 cookie = bp->nb_lblkno;
794 /* increment with every buffer read */
795 OSAddAtomic64(1, &nfsstats.readdir_bios);
796 }
797 lastcookie = cookie;
798
799 /*
800 * The NFS client is responsible for the "." and ".." entries in the
801 * directory. So, we put them at the start of the first buffer.
802 * Don't bother for attribute directories.
803 */
804 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
805 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
806 fh.fh_len = 0;
807 fhlen = rdirplus ? fh.fh_len + 1 : 0;
808 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
809 /* "." */
810 namlen = 1;
811 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
812 if (xlen)
813 bzero(&dp->d_name[namlen+1], xlen);
814 dp->d_namlen = namlen;
815 strlcpy(dp->d_name, ".", namlen+1);
816 dp->d_fileno = dnp->n_vattr.nva_fileid;
817 dp->d_type = DT_DIR;
818 dp->d_reclen = reclen;
819 dp->d_seekoff = 1;
820 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
821 dp = NFS_DIRENTRY_NEXT(dp);
822 padlen = (char*)dp - padstart;
823 if (padlen > 0)
824 bzero(padstart, padlen);
825 if (rdirplus) /* zero out attributes */
826 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
827
828 /* ".." */
829 namlen = 2;
830 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
831 if (xlen)
832 bzero(&dp->d_name[namlen+1], xlen);
833 dp->d_namlen = namlen;
834 strlcpy(dp->d_name, "..", namlen+1);
835 if (dnp->n_parent)
836 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
837 else
838 dp->d_fileno = dnp->n_vattr.nva_fileid;
839 dp->d_type = DT_DIR;
840 dp->d_reclen = reclen;
841 dp->d_seekoff = 2;
842 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
843 dp = NFS_DIRENTRY_NEXT(dp);
844 padlen = (char*)dp - padstart;
845 if (padlen > 0)
846 bzero(padstart, padlen);
847 if (rdirplus) /* zero out attributes */
848 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
849
850 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
851 ndbhp->ndbh_count = 2;
852 }
853
854 /*
855 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
856 * the buffer is full (or we hit EOF). Then put the remainder of the
857 * results in the next buffer(s).
858 */
859 nfsm_chain_null(&nmreq);
860 nfsm_chain_null(&nmrep);
861 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
862
863 // PUTFH, GETATTR, READDIR
864 numops = 3;
865 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
866 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
867 numops--;
868 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
869 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
870 numops--;
871 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
872 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
873 numops--;
874 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
875 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
876 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
877 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
878 nfsm_chain_add_32(error, &nmreq, nmrsize);
879 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
880 nfsm_chain_build_done(error, &nmreq);
881 nfsm_assert(error, (numops == 0), EPROTO);
882 nfs_node_unlock(dnp);
883 nfsmout_if(error);
884 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
885
886 if ((lockerror = nfs_node_lock(dnp)))
887 error = lockerror;
888
889 savedxid = xid;
890 nfsm_chain_skip_tag(error, &nmrep);
891 nfsm_chain_get_32(error, &nmrep, numops);
892 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
893 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
894 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
895 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
896 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
897 nfsm_chain_get_32(error, &nmrep, more_entries);
898
899 if (!lockerror) {
900 nfs_node_unlock(dnp);
901 lockerror = ENOENT;
902 }
903 nfsmout_if(error);
904
905 if (rdirplus)
906 microuptime(&now);
907
908 /* loop through the entries packing them into the buffer */
909 while (more_entries) {
910 /* Entry: COOKIE, NAME, FATTR */
911 nfsm_chain_get_64(error, &nmrep, cookie);
912 nfsm_chain_get_32(error, &nmrep, namlen);
913 nfsmout_if(error);
914 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
915 /* we've got a big cookie, make sure flag is set */
916 lck_mtx_lock(&nmp->nm_lock);
917 nmp->nm_state |= NFSSTA_BIGCOOKIES;
918 lck_mtx_unlock(&nmp->nm_lock);
919 bigcookies = 1;
920 }
921 /* just truncate names that don't fit in direntry.d_name */
922 if (namlen <= 0) {
923 error = EBADRPC;
924 goto nfsmout;
925 }
926 if (namlen > (sizeof(dp->d_name)-1)) {
927 skiplen = namlen - sizeof(dp->d_name) + 1;
928 namlen = sizeof(dp->d_name) - 1;
929 } else {
930 skiplen = 0;
931 }
932 /* guess that fh size will be same as parent */
933 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
934 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
935 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
936 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
937 space_needed = reclen + attrlen;
938 space_free = nfs_dir_buf_freespace(bp, rdirplus);
939 if (space_needed > space_free) {
940 /*
941 * We still have entries to pack, but we've
942 * run out of room in the current buffer.
943 * So we need to move to the next buffer.
944 * The block# for the next buffer is the
945 * last cookie in the current buffer.
946 */
947 nextbuffer:
948 ndbhp->ndbh_flags |= NDB_FULL;
949 nfs_buf_release(bp, 0);
950 bp_dropped = 1;
951 bp = NULL;
952 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
953 nfsmout_if(error);
954 /* initialize buffer */
955 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
956 ndbhp->ndbh_flags = 0;
957 ndbhp->ndbh_count = 0;
958 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
959 ndbhp->ndbh_ncgen = dnp->n_ncgen;
960 space_free = nfs_dir_buf_freespace(bp, rdirplus);
961 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
962 /* increment with every buffer read */
963 OSAddAtomic64(1, &nfsstats.readdir_bios);
964 }
965 nmrepsave = nmrep;
966 dp->d_fileno = cookie; /* placeholder */
967 dp->d_seekoff = cookie;
968 dp->d_namlen = namlen;
969 dp->d_reclen = reclen;
970 dp->d_type = DT_UNKNOWN;
971 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
972 nfsmout_if(error);
973 dp->d_name[namlen] = '\0';
974 if (skiplen)
975 nfsm_chain_adv(error, &nmrep,
976 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
977 nfsmout_if(error);
978 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
979 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
980 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
981 /* we do NOT want ACLs returned to us here */
982 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
983 if (nvattrp->nva_acl) {
984 kauth_acl_free(nvattrp->nva_acl);
985 nvattrp->nva_acl = NULL;
986 }
987 }
988 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
989 /* OK, we may not have gotten all of the attributes but we will use what we can. */
990 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
991 /* set this up to look like a referral trigger */
992 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
993 }
994 error = 0;
995 }
996 /* check for more entries after this one */
997 nfsm_chain_get_32(error, &nmrep, more_entries);
998 nfsmout_if(error);
999
1000 /* Skip any "." and ".." entries returned from server. */
1001 /* Also skip any bothersome named attribute entries. */
1002 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1003 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1004 lastcookie = cookie;
1005 continue;
1006 }
1007
1008 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
1009 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1010 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
1011 dp->d_fileno = nvattrp->nva_fileid;
1012 if (rdirplus) {
1013 /* fileid is already in d_fileno, so stash xid in attrs */
1014 nvattrp->nva_fileid = savedxid;
1015 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1016 fhlen = fh.fh_len + 1;
1017 xlen = fhlen + sizeof(time_t);
1018 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1019 space_needed = reclen + attrlen;
1020 if (space_needed > space_free) {
1021 /* didn't actually have the room... move on to next buffer */
1022 nmrep = nmrepsave;
1023 goto nextbuffer;
1024 }
1025 /* pack the file handle into the record */
1026 dp->d_name[dp->d_namlen+1] = fh.fh_len;
1027 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
1028 } else {
1029 /* mark the file handle invalid */
1030 fh.fh_len = 0;
1031 fhlen = fh.fh_len + 1;
1032 xlen = fhlen + sizeof(time_t);
1033 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1034 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
1035 }
1036 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
1037 dp->d_reclen = reclen;
1038 }
1039 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1040 ndbhp->ndbh_count++;
1041 lastcookie = cookie;
1042
1043 /* advance to next direntry in buffer */
1044 dp = NFS_DIRENTRY_NEXT(dp);
1045 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1046 /* zero out the pad bytes */
1047 padlen = (char*)dp - padstart;
1048 if (padlen > 0)
1049 bzero(padstart, padlen);
1050 }
1051 /* Finally, get the eof boolean */
1052 nfsm_chain_get_32(error, &nmrep, eof);
1053 nfsmout_if(error);
1054 if (eof) {
1055 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
1056 nfs_node_lock_force(dnp);
1057 dnp->n_eofcookie = lastcookie;
1058 nfs_node_unlock(dnp);
1059 } else {
1060 more_entries = 1;
1061 }
1062 if (bp_dropped) {
1063 nfs_buf_release(bp, 0);
1064 bp = NULL;
1065 break;
1066 }
1067 if ((lockerror = nfs_node_lock(dnp)))
1068 error = lockerror;
1069 nfsmout_if(error);
1070 nfsm_chain_cleanup(&nmrep);
1071 nfsm_chain_null(&nmreq);
1072 }
1073 nfsmout:
1074 if (bp_dropped && bp)
1075 nfs_buf_release(bp, 0);
1076 if (!lockerror)
1077 nfs_node_unlock(dnp);
1078 nfsm_chain_cleanup(&nmreq);
1079 nfsm_chain_cleanup(&nmrep);
1080 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
1081 }
1082
1083 int
1084 nfs4_lookup_rpc_async(
1085 nfsnode_t dnp,
1086 char *name,
1087 int namelen,
1088 vfs_context_t ctx,
1089 struct nfsreq **reqp)
1090 {
1091 int error = 0, isdotdot = 0, nfsvers, numops;
1092 struct nfsm_chain nmreq;
1093 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1094 struct nfsmount *nmp;
1095 struct nfsreq_secinfo_args si;
1096
1097 nmp = NFSTONMP(dnp);
1098 if (nfs_mount_gone(nmp))
1099 return (ENXIO);
1100 nfsvers = nmp->nm_vers;
1101 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1102 return (EINVAL);
1103
1104 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1105 isdotdot = 1;
1106 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1107 } else {
1108 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1109 }
1110
1111 nfsm_chain_null(&nmreq);
1112
1113 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1114 numops = 5;
1115 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1116 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1117 numops--;
1118 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1119 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1120 numops--;
1121 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1122 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1123 numops--;
1124 if (isdotdot) {
1125 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1126 } else {
1127 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1128 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1129 }
1130 numops--;
1131 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1132 numops--;
1133 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1134 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1135 /* some ".zfs" directories can't handle being asked for some attributes */
1136 if ((dnp->n_flag & NISDOTZFS) && !isdotdot)
1137 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1138 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot)
1139 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1140 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's')))
1141 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1142 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1143 nfsm_chain_build_done(error, &nmreq);
1144 nfsm_assert(error, (numops == 0), EPROTO);
1145 nfsmout_if(error);
1146 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1147 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1148 nfsmout:
1149 nfsm_chain_cleanup(&nmreq);
1150 return (error);
1151 }
1152
1153
1154 int
1155 nfs4_lookup_rpc_async_finish(
1156 nfsnode_t dnp,
1157 char *name,
1158 int namelen,
1159 vfs_context_t ctx,
1160 struct nfsreq *req,
1161 u_int64_t *xidp,
1162 fhandle_t *fhp,
1163 struct nfs_vattr *nvap)
1164 {
1165 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1166 uint32_t op = NFS_OP_LOOKUP;
1167 u_int64_t xid;
1168 struct nfsmount *nmp;
1169 struct nfsm_chain nmrep;
1170
1171 nmp = NFSTONMP(dnp);
1172 if (nmp == NULL)
1173 return (ENXIO);
1174 nfsvers = nmp->nm_vers;
1175 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1176 isdotdot = 1;
1177
1178 nfsm_chain_null(&nmrep);
1179
1180 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1181
1182 if ((lockerror = nfs_node_lock(dnp)))
1183 error = lockerror;
1184 nfsm_chain_skip_tag(error, &nmrep);
1185 nfsm_chain_get_32(error, &nmrep, numops);
1186 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1187 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1188 if (xidp)
1189 *xidp = xid;
1190 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1191
1192 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1193 nfsmout_if(error || !fhp || !nvap);
1194 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1195 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1196 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1197 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1198 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1199 /* set this up to look like a referral trigger */
1200 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1201 error = 0;
1202 } else {
1203 nfsmout_if(error);
1204 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1205 }
1206 nfsmout:
1207 if (!lockerror)
1208 nfs_node_unlock(dnp);
1209 nfsm_chain_cleanup(&nmrep);
1210 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1211 /* We still need to get SECINFO to set default for mount. */
1212 /* Do so for the first LOOKUP that returns successfully. */
1213 struct nfs_sec sec;
1214
1215 sec.count = NX_MAX_SEC_FLAVORS;
1216 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1217 /* [sigh] some implementations return "illegal" error for unsupported ops */
1218 if (error == NFSERR_OP_ILLEGAL)
1219 error = 0;
1220 if (!error) {
1221 /* set our default security flavor to the first in the list */
1222 lck_mtx_lock(&nmp->nm_lock);
1223 if (sec.count)
1224 nmp->nm_auth = sec.flavors[0];
1225 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1226 lck_mtx_unlock(&nmp->nm_lock);
1227 }
1228 }
1229 return (error);
1230 }
1231
1232 int
1233 nfs4_commit_rpc(
1234 nfsnode_t np,
1235 uint64_t offset,
1236 uint64_t count,
1237 kauth_cred_t cred,
1238 uint64_t wverf)
1239 {
1240 struct nfsmount *nmp;
1241 int error = 0, lockerror, status, nfsvers, numops;
1242 u_int64_t xid, newwverf;
1243 uint32_t count32;
1244 struct nfsm_chain nmreq, nmrep;
1245 struct nfsreq_secinfo_args si;
1246
1247 nmp = NFSTONMP(np);
1248 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1249 if (nfs_mount_gone(nmp))
1250 return (ENXIO);
1251 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1252 return (EINVAL);
1253 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1254 return (0);
1255 nfsvers = nmp->nm_vers;
1256
1257 if (count > UINT32_MAX)
1258 count32 = 0;
1259 else
1260 count32 = count;
1261
1262 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1263 nfsm_chain_null(&nmreq);
1264 nfsm_chain_null(&nmrep);
1265
1266 // PUTFH, COMMIT, GETATTR
1267 numops = 3;
1268 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1269 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1270 numops--;
1271 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1272 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1273 numops--;
1274 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1275 nfsm_chain_add_64(error, &nmreq, offset);
1276 nfsm_chain_add_32(error, &nmreq, count32);
1277 numops--;
1278 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1279 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1280 nfsm_chain_build_done(error, &nmreq);
1281 nfsm_assert(error, (numops == 0), EPROTO);
1282 nfsmout_if(error);
1283 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1284 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1285
1286 if ((lockerror = nfs_node_lock(np)))
1287 error = lockerror;
1288 nfsm_chain_skip_tag(error, &nmrep);
1289 nfsm_chain_get_32(error, &nmrep, numops);
1290 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1291 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1292 nfsm_chain_get_64(error, &nmrep, newwverf);
1293 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1294 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1295 if (!lockerror)
1296 nfs_node_unlock(np);
1297 nfsmout_if(error);
1298 lck_mtx_lock(&nmp->nm_lock);
1299 if (nmp->nm_verf != newwverf)
1300 nmp->nm_verf = newwverf;
1301 if (wverf != newwverf)
1302 error = NFSERR_STALEWRITEVERF;
1303 lck_mtx_unlock(&nmp->nm_lock);
1304 nfsmout:
1305 nfsm_chain_cleanup(&nmreq);
1306 nfsm_chain_cleanup(&nmrep);
1307 return (error);
1308 }
1309
1310 int
1311 nfs4_pathconf_rpc(
1312 nfsnode_t np,
1313 struct nfs_fsattr *nfsap,
1314 vfs_context_t ctx)
1315 {
1316 u_int64_t xid;
1317 int error = 0, lockerror, status, nfsvers, numops;
1318 struct nfsm_chain nmreq, nmrep;
1319 struct nfsmount *nmp = NFSTONMP(np);
1320 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1321 struct nfs_vattr nvattr;
1322 struct nfsreq_secinfo_args si;
1323
1324 if (nfs_mount_gone(nmp))
1325 return (ENXIO);
1326 nfsvers = nmp->nm_vers;
1327 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1328 return (EINVAL);
1329
1330 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1331 NVATTR_INIT(&nvattr);
1332 nfsm_chain_null(&nmreq);
1333 nfsm_chain_null(&nmrep);
1334
1335 /* NFSv4: fetch "pathconf" info for this node */
1336 // PUTFH, GETATTR
1337 numops = 2;
1338 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1339 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1340 numops--;
1341 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1342 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1343 numops--;
1344 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1345 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1346 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1347 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1348 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1349 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1350 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1351 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1352 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1353 nfsm_chain_build_done(error, &nmreq);
1354 nfsm_assert(error, (numops == 0), EPROTO);
1355 nfsmout_if(error);
1356 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1357
1358 nfsm_chain_skip_tag(error, &nmrep);
1359 nfsm_chain_get_32(error, &nmrep, numops);
1360 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1361 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1362 nfsmout_if(error);
1363 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1364 nfsmout_if(error);
1365 if ((lockerror = nfs_node_lock(np)))
1366 error = lockerror;
1367 if (!error)
1368 nfs_loadattrcache(np, &nvattr, &xid, 0);
1369 if (!lockerror)
1370 nfs_node_unlock(np);
1371 nfsmout:
1372 NVATTR_CLEANUP(&nvattr);
1373 nfsm_chain_cleanup(&nmreq);
1374 nfsm_chain_cleanup(&nmrep);
1375 return (error);
1376 }
1377
1378 int
1379 nfs4_vnop_getattr(
1380 struct vnop_getattr_args /* {
1381 struct vnodeop_desc *a_desc;
1382 vnode_t a_vp;
1383 struct vnode_attr *a_vap;
1384 vfs_context_t a_context;
1385 } */ *ap)
1386 {
1387 struct vnode_attr *vap = ap->a_vap;
1388 struct nfsmount *nmp;
1389 struct nfs_vattr nva;
1390 int error, acls, ngaflags;
1391
1392 nmp = VTONMP(ap->a_vp);
1393 if (nfs_mount_gone(nmp))
1394 return (ENXIO);
1395 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1396
1397 ngaflags = NGA_CACHED;
1398 if (VATTR_IS_ACTIVE(vap, va_acl) && acls)
1399 ngaflags |= NGA_ACL;
1400 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1401 if (error)
1402 return (error);
1403
1404 /* copy what we have in nva to *a_vap */
1405 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1406 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1407 VATTR_RETURN(vap, va_rdev, rdev);
1408 }
1409 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
1410 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1411 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
1412 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1413 // VATTR_RETURN(vap, va_data_alloc, ???);
1414 // VATTR_RETURN(vap, va_total_size, ???);
1415 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
1416 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1417 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1418 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1419 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1420 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1421 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1422 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1423 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1424 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1425 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1426 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1427 VATTR_RETURN(vap, va_mode, 0777);
1428 else
1429 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1430 }
1431 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1432 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1433 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1434 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1435 uint32_t flags = 0;
1436 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1437 (nva.nva_flags & NFS_FFLAG_ARCHIVED))
1438 flags |= SF_ARCHIVED;
1439 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1440 (nva.nva_flags & NFS_FFLAG_HIDDEN))
1441 flags |= UF_HIDDEN;
1442 VATTR_RETURN(vap, va_flags, flags);
1443 }
1444 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1445 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1446 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1447 VATTR_SET_SUPPORTED(vap, va_create_time);
1448 }
1449 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1450 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1451 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1452 VATTR_SET_SUPPORTED(vap, va_access_time);
1453 }
1454 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1455 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1456 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1457 VATTR_SET_SUPPORTED(vap, va_modify_time);
1458 }
1459 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1460 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1461 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1462 VATTR_SET_SUPPORTED(vap, va_change_time);
1463 }
1464 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1465 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1466 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1467 VATTR_SET_SUPPORTED(vap, va_backup_time);
1468 }
1469 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
1470 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1471 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
1472 VATTR_RETURN(vap, va_type, nva.nva_type);
1473 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
1474 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1475
1476 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1477 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1478 nva.nva_acl = NULL;
1479 }
1480
1481 // other attrs we might support someday:
1482 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1483
1484 NVATTR_CLEANUP(&nva);
1485 return (error);
1486 }
1487
1488 int
1489 nfs4_setattr_rpc(
1490 nfsnode_t np,
1491 struct vnode_attr *vap,
1492 vfs_context_t ctx)
1493 {
1494 struct nfsmount *nmp = NFSTONMP(np);
1495 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1496 u_int64_t xid, nextxid;
1497 struct nfsm_chain nmreq, nmrep;
1498 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1499 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1500 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1501 nfs_stateid stateid;
1502 struct nfsreq_secinfo_args si;
1503
1504 if (nfs_mount_gone(nmp))
1505 return (ENXIO);
1506 nfsvers = nmp->nm_vers;
1507 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
1508 return (EINVAL);
1509
1510 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1511 /* we don't support setting unsupported flags (duh!) */
1512 if (vap->va_active & ~VNODE_ATTR_va_flags)
1513 return (EINVAL); /* return EINVAL if other attributes also set */
1514 else
1515 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1516 }
1517
1518 /* don't bother requesting some changes if they don't look like they are changing */
1519 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid))
1520 VATTR_CLEAR_ACTIVE(vap, va_uid);
1521 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid))
1522 VATTR_CLEAR_ACTIVE(vap, va_gid);
1523 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid))
1524 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1525 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid))
1526 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1527
1528 tryagain:
1529 /* do nothing if no attributes will be sent */
1530 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1531 if (!bitmap[0] && !bitmap[1])
1532 return (0);
1533
1534 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1535 nfsm_chain_null(&nmreq);
1536 nfsm_chain_null(&nmrep);
1537
1538 /*
1539 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1540 * need to invalidate any cached ACL. And if we had an ACL cached,
1541 * we might as well also fetch the new value.
1542 */
1543 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1544 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1545 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1546 if (NACLVALID(np))
1547 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1548 NACLINVALIDATE(np);
1549 }
1550
1551 // PUTFH, SETATTR, GETATTR
1552 numops = 3;
1553 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1554 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1555 numops--;
1556 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1557 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1558 numops--;
1559 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1560 if (VATTR_IS_ACTIVE(vap, va_data_size))
1561 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1562 else
1563 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1564 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1565 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1566 numops--;
1567 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1568 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1569 nfsm_chain_build_done(error, &nmreq);
1570 nfsm_assert(error, (numops == 0), EPROTO);
1571 nfsmout_if(error);
1572 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1573
1574 if ((lockerror = nfs_node_lock(np)))
1575 error = lockerror;
1576 nfsm_chain_skip_tag(error, &nmrep);
1577 nfsm_chain_get_32(error, &nmrep, numops);
1578 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1579 nfsmout_if(error);
1580 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1581 nfsmout_if(error == EBADRPC);
1582 setattr_error = error;
1583 error = 0;
1584 bmlen = NFS_ATTR_BITMAP_LEN;
1585 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1586 if (!error) {
1587 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
1588 microuptime(&np->n_lastio);
1589 nfs_vattr_set_supported(setbitmap, vap);
1590 error = setattr_error;
1591 }
1592 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1593 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1594 if (error)
1595 NATTRINVALIDATE(np);
1596 /*
1597 * We just changed the attributes and we want to make sure that we
1598 * see the latest attributes. Get the next XID. If it's not the
1599 * next XID after the SETATTR XID, then it's possible that another
1600 * RPC was in flight at the same time and it might put stale attributes
1601 * in the cache. In that case, we invalidate the attributes and set
1602 * the attribute cache XID to guarantee that newer attributes will
1603 * get loaded next.
1604 */
1605 nextxid = 0;
1606 nfs_get_xid(&nextxid);
1607 if (nextxid != (xid + 1)) {
1608 np->n_xid = nextxid;
1609 NATTRINVALIDATE(np);
1610 }
1611 nfsmout:
1612 if (!lockerror)
1613 nfs_node_unlock(np);
1614 nfsm_chain_cleanup(&nmreq);
1615 nfsm_chain_cleanup(&nmrep);
1616 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1617 /*
1618 * Some server's may not like ACL/mode combos that get sent.
1619 * If it looks like that's what the server choked on, try setting
1620 * just the ACL and not the mode (unless it looks like everything
1621 * but mode was already successfully set).
1622 */
1623 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1624 ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) {
1625 VATTR_CLEAR_ACTIVE(vap, va_mode);
1626 error = 0;
1627 goto tryagain;
1628 }
1629 }
1630 return (error);
1631 }
1632
1633 /*
1634 * Wait for any pending recovery to complete.
1635 */
1636 int
1637 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1638 {
1639 struct timespec ts = { 1, 0 };
1640 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1641
1642 lck_mtx_lock(&nmp->nm_lock);
1643 while (nmp->nm_state & NFSSTA_RECOVER) {
1644 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1645 break;
1646 nfs_mount_sock_thread_wake(nmp);
1647 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1648 slpflag = 0;
1649 }
1650 lck_mtx_unlock(&nmp->nm_lock);
1651
1652 return (error);
1653 }
1654
1655 /*
1656 * We're about to use/manipulate NFS mount's open/lock state.
1657 * Wait for any pending state recovery to complete, then
1658 * mark the state as being in use (which will hold off
1659 * the recovery thread until we're done).
1660 */
1661 int
1662 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1663 {
1664 struct timespec ts = { 1, 0 };
1665 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1666
1667 if (nfs_mount_gone(nmp))
1668 return (ENXIO);
1669 lck_mtx_lock(&nmp->nm_lock);
1670 if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) {
1671 lck_mtx_unlock(&nmp->nm_lock);
1672 return (ENXIO);
1673 }
1674 while (nmp->nm_state & NFSSTA_RECOVER) {
1675 if ((error = nfs_sigintr(nmp, NULL, thd, 1)))
1676 break;
1677 nfs_mount_sock_thread_wake(nmp);
1678 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1679 slpflag = 0;
1680 }
1681 if (!error)
1682 nmp->nm_stateinuse++;
1683 lck_mtx_unlock(&nmp->nm_lock);
1684
1685 return (error);
1686 }
1687
1688 /*
1689 * We're done using/manipulating the NFS mount's open/lock
1690 * state. If the given error indicates that recovery should
1691 * be performed, we'll initiate recovery.
1692 */
1693 int
1694 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1695 {
1696 int restart = nfs_mount_state_error_should_restart(error);
1697
1698 if (nfs_mount_gone(nmp))
1699 return (restart);
1700 lck_mtx_lock(&nmp->nm_lock);
1701 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1702 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1703 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1704 nfs_need_recover(nmp, error);
1705 }
1706 if (nmp->nm_stateinuse > 0)
1707 nmp->nm_stateinuse--;
1708 else
1709 panic("NFS mount state in use count underrun");
1710 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1711 wakeup(&nmp->nm_stateinuse);
1712 lck_mtx_unlock(&nmp->nm_lock);
1713 if (error == NFSERR_GRACE)
1714 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1715
1716 return (restart);
1717 }
1718
1719 /*
1720 * Does the error mean we should restart/redo a state-related operation?
1721 */
1722 int
1723 nfs_mount_state_error_should_restart(int error)
1724 {
1725 switch (error) {
1726 case NFSERR_STALE_STATEID:
1727 case NFSERR_STALE_CLIENTID:
1728 case NFSERR_ADMIN_REVOKED:
1729 case NFSERR_EXPIRED:
1730 case NFSERR_OLD_STATEID:
1731 case NFSERR_BAD_STATEID:
1732 case NFSERR_GRACE:
1733 return (1);
1734 }
1735 return (0);
1736 }
1737
1738 /*
1739 * In some cases we may want to limit how many times we restart a
1740 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1741 * Base the limit on the lease (as long as it's not too short).
1742 */
1743 uint
1744 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1745 {
1746 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1747 }
1748
1749 /*
1750 * Does the error mean we probably lost a delegation?
1751 */
1752 int
1753 nfs_mount_state_error_delegation_lost(int error)
1754 {
1755 switch (error) {
1756 case NFSERR_STALE_STATEID:
1757 case NFSERR_ADMIN_REVOKED:
1758 case NFSERR_EXPIRED:
1759 case NFSERR_OLD_STATEID:
1760 case NFSERR_BAD_STATEID:
1761 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1762 return (1);
1763 }
1764 return (0);
1765 }
1766
1767
1768 /*
1769 * Mark an NFS node's open state as busy.
1770 */
1771 int
1772 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1773 {
1774 struct nfsmount *nmp;
1775 struct timespec ts = {2, 0};
1776 int error = 0, slpflag;
1777
1778 nmp = NFSTONMP(np);
1779 if (nfs_mount_gone(nmp))
1780 return (ENXIO);
1781 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1782
1783 lck_mtx_lock(&np->n_openlock);
1784 while (np->n_openflags & N_OPENBUSY) {
1785 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1786 break;
1787 np->n_openflags |= N_OPENWANT;
1788 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1789 slpflag = 0;
1790 }
1791 if (!error)
1792 np->n_openflags |= N_OPENBUSY;
1793 lck_mtx_unlock(&np->n_openlock);
1794
1795 return (error);
1796 }
1797
1798 /*
1799 * Clear an NFS node's open state busy flag and wake up
1800 * anyone wanting it.
1801 */
1802 void
1803 nfs_open_state_clear_busy(nfsnode_t np)
1804 {
1805 int wanted;
1806
1807 lck_mtx_lock(&np->n_openlock);
1808 if (!(np->n_openflags & N_OPENBUSY))
1809 panic("nfs_open_state_clear_busy");
1810 wanted = (np->n_openflags & N_OPENWANT);
1811 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1812 lck_mtx_unlock(&np->n_openlock);
1813 if (wanted)
1814 wakeup(&np->n_openflags);
1815 }
1816
1817 /*
1818 * Search a mount's open owner list for the owner for this credential.
1819 * If not found and "alloc" is set, then allocate a new one.
1820 */
1821 struct nfs_open_owner *
1822 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1823 {
1824 uid_t uid = kauth_cred_getuid(cred);
1825 struct nfs_open_owner *noop, *newnoop = NULL;
1826
1827 tryagain:
1828 lck_mtx_lock(&nmp->nm_lock);
1829 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1830 if (kauth_cred_getuid(noop->noo_cred) == uid)
1831 break;
1832 }
1833
1834 if (!noop && !newnoop && alloc) {
1835 lck_mtx_unlock(&nmp->nm_lock);
1836 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1837 if (!newnoop)
1838 return (NULL);
1839 bzero(newnoop, sizeof(*newnoop));
1840 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1841 newnoop->noo_mount = nmp;
1842 kauth_cred_ref(cred);
1843 newnoop->noo_cred = cred;
1844 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1845 TAILQ_INIT(&newnoop->noo_opens);
1846 goto tryagain;
1847 }
1848 if (!noop && newnoop) {
1849 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1850 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1851 noop = newnoop;
1852 }
1853 lck_mtx_unlock(&nmp->nm_lock);
1854
1855 if (newnoop && (noop != newnoop))
1856 nfs_open_owner_destroy(newnoop);
1857
1858 if (noop)
1859 nfs_open_owner_ref(noop);
1860
1861 return (noop);
1862 }
1863
1864 /*
1865 * destroy an open owner that's no longer needed
1866 */
1867 void
1868 nfs_open_owner_destroy(struct nfs_open_owner *noop)
1869 {
1870 if (noop->noo_cred)
1871 kauth_cred_unref(&noop->noo_cred);
1872 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1873 FREE(noop, M_TEMP);
1874 }
1875
1876 /*
1877 * acquire a reference count on an open owner
1878 */
1879 void
1880 nfs_open_owner_ref(struct nfs_open_owner *noop)
1881 {
1882 lck_mtx_lock(&noop->noo_lock);
1883 noop->noo_refcnt++;
1884 lck_mtx_unlock(&noop->noo_lock);
1885 }
1886
1887 /*
1888 * drop a reference count on an open owner and destroy it if
1889 * it is no longer referenced and no longer on the mount's list.
1890 */
1891 void
1892 nfs_open_owner_rele(struct nfs_open_owner *noop)
1893 {
1894 lck_mtx_lock(&noop->noo_lock);
1895 if (noop->noo_refcnt < 1)
1896 panic("nfs_open_owner_rele: no refcnt");
1897 noop->noo_refcnt--;
1898 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1899 panic("nfs_open_owner_rele: busy");
1900 /* XXX we may potentially want to clean up idle/unused open owner structures */
1901 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1902 lck_mtx_unlock(&noop->noo_lock);
1903 return;
1904 }
1905 /* owner is no longer referenced or linked to mount, so destroy it */
1906 lck_mtx_unlock(&noop->noo_lock);
1907 nfs_open_owner_destroy(noop);
1908 }
1909
1910 /*
1911 * Mark an open owner as busy because we are about to
1912 * start an operation that uses and updates open owner state.
1913 */
1914 int
1915 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1916 {
1917 struct nfsmount *nmp;
1918 struct timespec ts = {2, 0};
1919 int error = 0, slpflag;
1920
1921 nmp = noop->noo_mount;
1922 if (nfs_mount_gone(nmp))
1923 return (ENXIO);
1924 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1925
1926 lck_mtx_lock(&noop->noo_lock);
1927 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1928 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1929 break;
1930 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1931 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
1932 slpflag = 0;
1933 }
1934 if (!error)
1935 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1936 lck_mtx_unlock(&noop->noo_lock);
1937
1938 return (error);
1939 }
1940
1941 /*
1942 * Clear the busy flag on an open owner and wake up anyone waiting
1943 * to mark it busy.
1944 */
1945 void
1946 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1947 {
1948 int wanted;
1949
1950 lck_mtx_lock(&noop->noo_lock);
1951 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1952 panic("nfs_open_owner_clear_busy");
1953 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1954 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1955 lck_mtx_unlock(&noop->noo_lock);
1956 if (wanted)
1957 wakeup(noop);
1958 }
1959
1960 /*
1961 * Given an open/lock owner and an error code, increment the
1962 * sequence ID if appropriate.
1963 */
1964 void
1965 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1966 {
1967 switch (error) {
1968 case NFSERR_STALE_CLIENTID:
1969 case NFSERR_STALE_STATEID:
1970 case NFSERR_OLD_STATEID:
1971 case NFSERR_BAD_STATEID:
1972 case NFSERR_BAD_SEQID:
1973 case NFSERR_BADXDR:
1974 case NFSERR_RESOURCE:
1975 case NFSERR_NOFILEHANDLE:
1976 /* do not increment the open seqid on these errors */
1977 return;
1978 }
1979 if (noop)
1980 noop->noo_seqid++;
1981 if (nlop)
1982 nlop->nlo_seqid++;
1983 }
1984
1985 /*
1986 * Search a node's open file list for any conflicts with this request.
1987 * Also find this open owner's open file structure.
1988 * If not found and "alloc" is set, then allocate one.
1989 */
1990 int
1991 nfs_open_file_find(
1992 nfsnode_t np,
1993 struct nfs_open_owner *noop,
1994 struct nfs_open_file **nofpp,
1995 uint32_t accessMode,
1996 uint32_t denyMode,
1997 int alloc)
1998 {
1999 *nofpp = NULL;
2000 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2001 }
2002
2003 /*
2004 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2005 * if an existing one is not found. This is used in "create" scenarios to
2006 * officially add the provisional nofp to the node once the node is created.
2007 */
2008 int
2009 nfs_open_file_find_internal(
2010 nfsnode_t np,
2011 struct nfs_open_owner *noop,
2012 struct nfs_open_file **nofpp,
2013 uint32_t accessMode,
2014 uint32_t denyMode,
2015 int alloc)
2016 {
2017 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2018
2019 if (!np)
2020 goto alloc;
2021 tryagain:
2022 lck_mtx_lock(&np->n_openlock);
2023 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2024 if (nofp2->nof_owner == noop) {
2025 nofp = nofp2;
2026 if (!accessMode)
2027 break;
2028 }
2029 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2030 /* This request conflicts with an existing open on this client. */
2031 lck_mtx_unlock(&np->n_openlock);
2032 return (EACCES);
2033 }
2034 }
2035
2036 /*
2037 * If this open owner doesn't have an open
2038 * file structure yet, we create one for it.
2039 */
2040 if (!nofp && !*nofpp && !newnofp && alloc) {
2041 lck_mtx_unlock(&np->n_openlock);
2042 alloc:
2043 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2044 if (!newnofp)
2045 return (ENOMEM);
2046 bzero(newnofp, sizeof(*newnofp));
2047 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2048 newnofp->nof_owner = noop;
2049 nfs_open_owner_ref(noop);
2050 newnofp->nof_np = np;
2051 lck_mtx_lock(&noop->noo_lock);
2052 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2053 lck_mtx_unlock(&noop->noo_lock);
2054 if (np)
2055 goto tryagain;
2056 }
2057 if (!nofp) {
2058 if (*nofpp) {
2059 (*nofpp)->nof_np = np;
2060 nofp = *nofpp;
2061 } else {
2062 nofp = newnofp;
2063 }
2064 if (nofp && np)
2065 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2066 }
2067 if (np)
2068 lck_mtx_unlock(&np->n_openlock);
2069
2070 if (alloc && newnofp && (nofp != newnofp))
2071 nfs_open_file_destroy(newnofp);
2072
2073 *nofpp = nofp;
2074 return (nofp ? 0 : ESRCH);
2075 }
2076
2077 /*
2078 * Destroy an open file structure.
2079 */
2080 void
2081 nfs_open_file_destroy(struct nfs_open_file *nofp)
2082 {
2083 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2084 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2085 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2086 nfs_open_owner_rele(nofp->nof_owner);
2087 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2088 FREE(nofp, M_TEMP);
2089 }
2090
2091 /*
2092 * Mark an open file as busy because we are about to
2093 * start an operation that uses and updates open file state.
2094 */
2095 int
2096 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2097 {
2098 struct nfsmount *nmp;
2099 struct timespec ts = {2, 0};
2100 int error = 0, slpflag;
2101
2102 nmp = nofp->nof_owner->noo_mount;
2103 if (nfs_mount_gone(nmp))
2104 return (ENXIO);
2105 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2106
2107 lck_mtx_lock(&nofp->nof_lock);
2108 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2109 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2110 break;
2111 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2112 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2113 slpflag = 0;
2114 }
2115 if (!error)
2116 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2117 lck_mtx_unlock(&nofp->nof_lock);
2118
2119 return (error);
2120 }
2121
2122 /*
2123 * Clear the busy flag on an open file and wake up anyone waiting
2124 * to mark it busy.
2125 */
2126 void
2127 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2128 {
2129 int wanted;
2130
2131 lck_mtx_lock(&nofp->nof_lock);
2132 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
2133 panic("nfs_open_file_clear_busy");
2134 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2135 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
2136 lck_mtx_unlock(&nofp->nof_lock);
2137 if (wanted)
2138 wakeup(nofp);
2139 }
2140
2141 /*
2142 * Add the open state for the given access/deny modes to this open file.
2143 */
2144 void
2145 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2146 {
2147 lck_mtx_lock(&nofp->nof_lock);
2148 nofp->nof_access |= accessMode;
2149 nofp->nof_deny |= denyMode;
2150
2151 if (delegated) {
2152 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2153 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2154 nofp->nof_d_r++;
2155 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2156 nofp->nof_d_w++;
2157 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2158 nofp->nof_d_rw++;
2159 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2160 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2161 nofp->nof_d_r_dw++;
2162 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2163 nofp->nof_d_w_dw++;
2164 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2165 nofp->nof_d_rw_dw++;
2166 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2167 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2168 nofp->nof_d_r_drw++;
2169 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2170 nofp->nof_d_w_drw++;
2171 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2172 nofp->nof_d_rw_drw++;
2173 }
2174 } else {
2175 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2176 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2177 nofp->nof_r++;
2178 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2179 nofp->nof_w++;
2180 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2181 nofp->nof_rw++;
2182 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2183 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2184 nofp->nof_r_dw++;
2185 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2186 nofp->nof_w_dw++;
2187 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2188 nofp->nof_rw_dw++;
2189 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2190 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2191 nofp->nof_r_drw++;
2192 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2193 nofp->nof_w_drw++;
2194 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2195 nofp->nof_rw_drw++;
2196 }
2197 }
2198
2199 nofp->nof_opencnt++;
2200 lck_mtx_unlock(&nofp->nof_lock);
2201 }
2202
2203 /*
2204 * Find which particular open combo will be closed and report what
2205 * the new modes will be and whether the open was delegated.
2206 */
2207 void
2208 nfs_open_file_remove_open_find(
2209 struct nfs_open_file *nofp,
2210 uint32_t accessMode,
2211 uint32_t denyMode,
2212 uint32_t *newAccessMode,
2213 uint32_t *newDenyMode,
2214 int *delegated)
2215 {
2216 /*
2217 * Calculate new modes: a mode bit gets removed when there's only
2218 * one count in all the corresponding counts
2219 */
2220 *newAccessMode = nofp->nof_access;
2221 *newDenyMode = nofp->nof_deny;
2222
2223 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2224 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2225 ((nofp->nof_r + nofp->nof_d_r +
2226 nofp->nof_rw + nofp->nof_d_rw +
2227 nofp->nof_r_dw + nofp->nof_d_r_dw +
2228 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2229 nofp->nof_r_drw + nofp->nof_d_r_drw +
2230 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2231 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2232 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2233 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2234 ((nofp->nof_w + nofp->nof_d_w +
2235 nofp->nof_rw + nofp->nof_d_rw +
2236 nofp->nof_w_dw + nofp->nof_d_w_dw +
2237 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2238 nofp->nof_w_drw + nofp->nof_d_w_drw +
2239 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2240 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2241 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2242 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2243 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2244 nofp->nof_w_drw + nofp->nof_d_w_drw +
2245 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1))
2246 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2247 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2248 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2249 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2250 nofp->nof_w_drw + nofp->nof_d_w_drw +
2251 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2252 nofp->nof_r_dw + nofp->nof_d_r_dw +
2253 nofp->nof_w_dw + nofp->nof_d_w_dw +
2254 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1))
2255 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2256
2257 /* Find the corresponding open access/deny mode counter. */
2258 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2259 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2260 *delegated = (nofp->nof_d_r != 0);
2261 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2262 *delegated = (nofp->nof_d_w != 0);
2263 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2264 *delegated = (nofp->nof_d_rw != 0);
2265 else
2266 *delegated = 0;
2267 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2268 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2269 *delegated = (nofp->nof_d_r_dw != 0);
2270 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2271 *delegated = (nofp->nof_d_w_dw != 0);
2272 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2273 *delegated = (nofp->nof_d_rw_dw != 0);
2274 else
2275 *delegated = 0;
2276 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2277 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2278 *delegated = (nofp->nof_d_r_drw != 0);
2279 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2280 *delegated = (nofp->nof_d_w_drw != 0);
2281 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2282 *delegated = (nofp->nof_d_rw_drw != 0);
2283 else
2284 *delegated = 0;
2285 }
2286 }
2287
2288 /*
2289 * Remove the open state for the given access/deny modes to this open file.
2290 */
2291 void
2292 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2293 {
2294 uint32_t newAccessMode, newDenyMode;
2295 int delegated = 0;
2296
2297 lck_mtx_lock(&nofp->nof_lock);
2298 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2299
2300 /* Decrement the corresponding open access/deny mode counter. */
2301 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2302 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2303 if (delegated) {
2304 if (nofp->nof_d_r == 0)
2305 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2306 else
2307 nofp->nof_d_r--;
2308 } else {
2309 if (nofp->nof_r == 0)
2310 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2311 else
2312 nofp->nof_r--;
2313 }
2314 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2315 if (delegated) {
2316 if (nofp->nof_d_w == 0)
2317 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2318 else
2319 nofp->nof_d_w--;
2320 } else {
2321 if (nofp->nof_w == 0)
2322 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2323 else
2324 nofp->nof_w--;
2325 }
2326 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2327 if (delegated) {
2328 if (nofp->nof_d_rw == 0)
2329 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2330 else
2331 nofp->nof_d_rw--;
2332 } else {
2333 if (nofp->nof_rw == 0)
2334 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2335 else
2336 nofp->nof_rw--;
2337 }
2338 }
2339 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2340 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2341 if (delegated) {
2342 if (nofp->nof_d_r_dw == 0)
2343 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2344 else
2345 nofp->nof_d_r_dw--;
2346 } else {
2347 if (nofp->nof_r_dw == 0)
2348 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2349 else
2350 nofp->nof_r_dw--;
2351 }
2352 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2353 if (delegated) {
2354 if (nofp->nof_d_w_dw == 0)
2355 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2356 else
2357 nofp->nof_d_w_dw--;
2358 } else {
2359 if (nofp->nof_w_dw == 0)
2360 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2361 else
2362 nofp->nof_w_dw--;
2363 }
2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2365 if (delegated) {
2366 if (nofp->nof_d_rw_dw == 0)
2367 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2368 else
2369 nofp->nof_d_rw_dw--;
2370 } else {
2371 if (nofp->nof_rw_dw == 0)
2372 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2373 else
2374 nofp->nof_rw_dw--;
2375 }
2376 }
2377 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2378 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2379 if (delegated) {
2380 if (nofp->nof_d_r_drw == 0)
2381 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2382 else
2383 nofp->nof_d_r_drw--;
2384 } else {
2385 if (nofp->nof_r_drw == 0)
2386 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2387 else
2388 nofp->nof_r_drw--;
2389 }
2390 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2391 if (delegated) {
2392 if (nofp->nof_d_w_drw == 0)
2393 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2394 else
2395 nofp->nof_d_w_drw--;
2396 } else {
2397 if (nofp->nof_w_drw == 0)
2398 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2399 else
2400 nofp->nof_w_drw--;
2401 }
2402 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2403 if (delegated) {
2404 if (nofp->nof_d_rw_drw == 0)
2405 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2406 else
2407 nofp->nof_d_rw_drw--;
2408 } else {
2409 if (nofp->nof_rw_drw == 0)
2410 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2411 else
2412 nofp->nof_rw_drw--;
2413 }
2414 }
2415 }
2416
2417 /* update the modes */
2418 nofp->nof_access = newAccessMode;
2419 nofp->nof_deny = newDenyMode;
2420 nofp->nof_opencnt--;
2421 lck_mtx_unlock(&nofp->nof_lock);
2422 }
2423
2424
2425 /*
2426 * Get the current (delegation, lock, open, default) stateid for this node.
2427 * If node has a delegation, use that stateid.
2428 * If pid has a lock, use the lockowner's stateid.
2429 * Or use the open file's stateid.
2430 * If no open file, use a default stateid of all ones.
2431 */
2432 void
2433 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2434 {
2435 struct nfsmount *nmp = NFSTONMP(np);
2436 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2437 struct nfs_open_owner *noop = NULL;
2438 struct nfs_open_file *nofp = NULL;
2439 struct nfs_lock_owner *nlop = NULL;
2440 nfs_stateid *s = NULL;
2441
2442 if (np->n_openflags & N_DELEG_MASK) {
2443 s = &np->n_dstateid;
2444 } else {
2445 if (p)
2446 nlop = nfs_lock_owner_find(np, p, 0);
2447 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2448 /* we hold locks, use lock stateid */
2449 s = &nlop->nlo_stateid;
2450 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2451 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2452 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2453 nofp->nof_access) {
2454 /* we (should) have the file open, use open stateid */
2455 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
2456 nfs4_reopen(nofp, thd);
2457 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2458 s = &nofp->nof_stateid;
2459 }
2460 }
2461
2462 if (s) {
2463 sid->seqid = s->seqid;
2464 sid->other[0] = s->other[0];
2465 sid->other[1] = s->other[1];
2466 sid->other[2] = s->other[2];
2467 } else {
2468 /* named attributes may not have a stateid for reads, so don't complain for them */
2469 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
2470 NP(np, "nfs_get_stateid: no stateid");
2471 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2472 }
2473 if (nlop)
2474 nfs_lock_owner_rele(nlop);
2475 if (noop)
2476 nfs_open_owner_rele(noop);
2477 }
2478
2479
2480 /*
2481 * When we have a delegation, we may be able to perform the OPEN locally.
2482 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2483 */
2484 int
2485 nfs4_open_delegated(
2486 nfsnode_t np,
2487 struct nfs_open_file *nofp,
2488 uint32_t accessMode,
2489 uint32_t denyMode,
2490 vfs_context_t ctx)
2491 {
2492 int error = 0, ismember, readtoo = 0, authorized = 0;
2493 uint32_t action;
2494 struct kauth_acl_eval eval;
2495 kauth_cred_t cred = vfs_context_ucred(ctx);
2496
2497 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2498 /*
2499 * Try to open it for read access too,
2500 * so the buffer cache can read data.
2501 */
2502 readtoo = 1;
2503 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2504 }
2505
2506 tryagain:
2507 action = 0;
2508 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ)
2509 action |= KAUTH_VNODE_READ_DATA;
2510 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE)
2511 action |= KAUTH_VNODE_WRITE_DATA;
2512
2513 /* evaluate ACE (if we have one) */
2514 if (np->n_dace.ace_flags) {
2515 eval.ae_requested = action;
2516 eval.ae_acl = &np->n_dace;
2517 eval.ae_count = 1;
2518 eval.ae_options = 0;
2519 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred))
2520 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2521 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2522 if (!error && ismember)
2523 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2524
2525 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2526 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2527 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2528 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2529
2530 error = kauth_acl_evaluate(cred, &eval);
2531
2532 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW))
2533 authorized = 1;
2534 }
2535
2536 if (!authorized) {
2537 /* need to ask the server via ACCESS */
2538 struct vnop_access_args naa;
2539 naa.a_desc = &vnop_access_desc;
2540 naa.a_vp = NFSTOV(np);
2541 naa.a_action = action;
2542 naa.a_context = ctx;
2543 if (!(error = nfs_vnop_access(&naa)))
2544 authorized = 1;
2545 }
2546
2547 if (!authorized) {
2548 if (readtoo) {
2549 /* try again without the extra read access */
2550 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2551 readtoo = 0;
2552 goto tryagain;
2553 }
2554 return (error ? error : EACCES);
2555 }
2556
2557 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2558
2559 return (0);
2560 }
2561
2562
2563 /*
2564 * Open a file with the given access/deny modes.
2565 *
2566 * If we have a delegation, we may be able to handle the open locally.
2567 * Otherwise, we will always send the open RPC even if this open's mode is
2568 * a subset of all the existing opens. This makes sure that we will always
2569 * be able to do a downgrade to any of the open modes.
2570 *
2571 * Note: local conflicts should have already been checked in nfs_open_file_find().
2572 */
2573 int
2574 nfs4_open(
2575 nfsnode_t np,
2576 struct nfs_open_file *nofp,
2577 uint32_t accessMode,
2578 uint32_t denyMode,
2579 vfs_context_t ctx)
2580 {
2581 vnode_t vp = NFSTOV(np);
2582 vnode_t dvp = NULL;
2583 struct componentname cn;
2584 const char *vname = NULL;
2585 size_t namelen;
2586 char smallname[128];
2587 char *filename = NULL;
2588 int error = 0, readtoo = 0;
2589
2590 /*
2591 * We can handle the OPEN ourselves if we have a delegation,
2592 * unless it's a read delegation and the open is asking for
2593 * either write access or deny read. We also don't bother to
2594 * use the delegation if it's being returned.
2595 */
2596 if (np->n_openflags & N_DELEG_MASK) {
2597 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
2598 return (error);
2599 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2600 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2601 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2602 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2603 nfs_open_state_clear_busy(np);
2604 return (error);
2605 }
2606 nfs_open_state_clear_busy(np);
2607 }
2608
2609 /*
2610 * [sigh] We can't trust VFS to get the parent right for named
2611 * attribute nodes. (It likes to reparent the nodes after we've
2612 * created them.) Luckily we can probably get the right parent
2613 * from the n_parent we have stashed away.
2614 */
2615 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2616 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
2617 dvp = NULL;
2618 if (!dvp)
2619 dvp = vnode_getparent(vp);
2620 vname = vnode_getname(vp);
2621 if (!dvp || !vname) {
2622 if (!error)
2623 error = EIO;
2624 goto out;
2625 }
2626 filename = &smallname[0];
2627 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2628 if (namelen >= sizeof(smallname)) {
2629 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
2630 if (!filename) {
2631 error = ENOMEM;
2632 goto out;
2633 }
2634 snprintf(filename, namelen+1, "%s", vname);
2635 }
2636 bzero(&cn, sizeof(cn));
2637 cn.cn_nameptr = filename;
2638 cn.cn_namelen = namelen;
2639
2640 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2641 /*
2642 * Try to open it for read access too,
2643 * so the buffer cache can read data.
2644 */
2645 readtoo = 1;
2646 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2647 }
2648 tryagain:
2649 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2650 if (error) {
2651 if (!nfs_mount_state_error_should_restart(error) &&
2652 (error != EINTR) && (error != ERESTART) && readtoo) {
2653 /* try again without the extra read access */
2654 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2655 readtoo = 0;
2656 goto tryagain;
2657 }
2658 goto out;
2659 }
2660 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2661 out:
2662 if (filename && (filename != &smallname[0]))
2663 FREE(filename, M_TEMP);
2664 if (vname)
2665 vnode_putname(vname);
2666 if (dvp != NULLVP)
2667 vnode_put(dvp);
2668 return (error);
2669 }
2670
2671 int
2672 nfs_vnop_mmap(
2673 struct vnop_mmap_args /* {
2674 struct vnodeop_desc *a_desc;
2675 vnode_t a_vp;
2676 int a_fflags;
2677 vfs_context_t a_context;
2678 } */ *ap)
2679 {
2680 vfs_context_t ctx = ap->a_context;
2681 vnode_t vp = ap->a_vp;
2682 nfsnode_t np = VTONFS(vp);
2683 int error = 0, accessMode, denyMode, delegated;
2684 struct nfsmount *nmp;
2685 struct nfs_open_owner *noop = NULL;
2686 struct nfs_open_file *nofp = NULL;
2687
2688 nmp = VTONMP(vp);
2689 if (nfs_mount_gone(nmp))
2690 return (ENXIO);
2691
2692 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2693 return (EINVAL);
2694 if (np->n_flag & NREVOKE)
2695 return (EIO);
2696
2697 /*
2698 * fflags contains some combination of: PROT_READ, PROT_WRITE
2699 * Since it's not possible to mmap() without having the file open for reading,
2700 * read access is always there (regardless if PROT_READ is not set).
2701 */
2702 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2703 if (ap->a_fflags & PROT_WRITE)
2704 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2705 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2706
2707 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2708 if (!noop)
2709 return (ENOMEM);
2710
2711 restart:
2712 error = nfs_mount_state_in_use_start(nmp, NULL);
2713 if (error) {
2714 nfs_open_owner_rele(noop);
2715 return (error);
2716 }
2717 if (np->n_flag & NREVOKE) {
2718 error = EIO;
2719 nfs_mount_state_in_use_end(nmp, 0);
2720 nfs_open_owner_rele(noop);
2721 return (error);
2722 }
2723
2724 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2725 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2726 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2727 error = EPERM;
2728 }
2729 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2730 nfs_mount_state_in_use_end(nmp, 0);
2731 error = nfs4_reopen(nofp, NULL);
2732 nofp = NULL;
2733 if (!error)
2734 goto restart;
2735 }
2736 if (!error)
2737 error = nfs_open_file_set_busy(nofp, NULL);
2738 if (error) {
2739 nofp = NULL;
2740 goto out;
2741 }
2742
2743 /*
2744 * The open reference for mmap must mirror an existing open because
2745 * we may need to reclaim it after the file is closed.
2746 * So grab another open count matching the accessMode passed in.
2747 * If we already had an mmap open, prefer read/write without deny mode.
2748 * This means we may have to drop the current mmap open first.
2749 *
2750 * N.B. We should have an open for the mmap, because, mmap was
2751 * called on an open descriptor, or we've created an open for read
2752 * from reading the first page for execve. However, if we piggy
2753 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2754 * that open may have closed.
2755 */
2756
2757 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2758 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2759 /* We shouldn't get here. We've already open the file for execve */
2760 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2761 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2762 }
2763 /*
2764 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2765 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2766 */
2767 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2768 /* not asking for just read access -> fail */
2769 error = EPERM;
2770 goto out;
2771 }
2772 /* we don't have the file open, so open it for read access */
2773 if (nmp->nm_vers < NFS_VER4) {
2774 /* NFS v2/v3 opens are always allowed - so just add it. */
2775 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
2776 error = 0;
2777 } else {
2778 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
2779 }
2780 if (!error)
2781 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
2782 if (error)
2783 goto out;
2784 }
2785
2786 /* determine deny mode for open */
2787 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2788 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2789 delegated = 1;
2790 if (nofp->nof_d_rw)
2791 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2792 else if (nofp->nof_d_rw_dw)
2793 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2794 else if (nofp->nof_d_rw_drw)
2795 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2796 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2797 delegated = 0;
2798 if (nofp->nof_rw)
2799 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2800 else if (nofp->nof_rw_dw)
2801 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2802 else if (nofp->nof_rw_drw)
2803 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2804 } else {
2805 error = EPERM;
2806 }
2807 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2808 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
2809 delegated = 1;
2810 if (nofp->nof_d_r)
2811 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2812 else if (nofp->nof_d_r_dw)
2813 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2814 else if (nofp->nof_d_r_drw)
2815 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2816 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
2817 delegated = 0;
2818 if (nofp->nof_r)
2819 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2820 else if (nofp->nof_r_dw)
2821 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2822 else if (nofp->nof_r_drw)
2823 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2824 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
2825 /*
2826 * This clause and the one below is to co-opt a read write access
2827 * for a read only mmaping. We probably got here in that an
2828 * existing rw open for an executable file already exists.
2829 */
2830 delegated = 1;
2831 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2832 if (nofp->nof_d_rw)
2833 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2834 else if (nofp->nof_d_rw_dw)
2835 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2836 else if (nofp->nof_d_rw_drw)
2837 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2838 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
2839 delegated = 0;
2840 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2841 if (nofp->nof_rw)
2842 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2843 else if (nofp->nof_rw_dw)
2844 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2845 else if (nofp->nof_rw_drw)
2846 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2847 } else {
2848 error = EPERM;
2849 }
2850 }
2851 if (error) /* mmap mode without proper open mode */
2852 goto out;
2853
2854 /*
2855 * If the existing mmap access is more than the new access OR the
2856 * existing access is the same and the existing deny mode is less,
2857 * then we'll stick with the existing mmap open mode.
2858 */
2859 if ((nofp->nof_mmap_access > accessMode) ||
2860 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2861 goto out;
2862
2863 /* update mmap open mode */
2864 if (nofp->nof_mmap_access) {
2865 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2866 if (error) {
2867 if (!nfs_mount_state_error_should_restart(error))
2868 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2869 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2870 goto out;
2871 }
2872 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2873 }
2874
2875 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
2876 nofp->nof_mmap_access = accessMode;
2877 nofp->nof_mmap_deny = denyMode;
2878
2879 out:
2880 if (nofp)
2881 nfs_open_file_clear_busy(nofp);
2882 if (nfs_mount_state_in_use_end(nmp, error)) {
2883 nofp = NULL;
2884 goto restart;
2885 }
2886 if (noop)
2887 nfs_open_owner_rele(noop);
2888
2889 if (!error) {
2890 int ismapped = 0;
2891 nfs_node_lock_force(np);
2892 if ((np->n_flag & NISMAPPED) == 0) {
2893 np->n_flag |= NISMAPPED;
2894 ismapped = 1;
2895 }
2896 nfs_node_unlock(np);
2897 if (ismapped) {
2898 lck_mtx_lock(&nmp->nm_lock);
2899 nmp->nm_state &= ~NFSSTA_SQUISHY;
2900 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
2901 if (nmp->nm_curdeadtimeout <= 0)
2902 nmp->nm_deadto_start = 0;
2903 nmp->nm_mappers++;
2904 lck_mtx_unlock(&nmp->nm_lock);
2905 }
2906 }
2907
2908 return (error);
2909 }
2910
2911
2912 int
2913 nfs_vnop_mnomap(
2914 struct vnop_mnomap_args /* {
2915 struct vnodeop_desc *a_desc;
2916 vnode_t a_vp;
2917 vfs_context_t a_context;
2918 } */ *ap)
2919 {
2920 vfs_context_t ctx = ap->a_context;
2921 vnode_t vp = ap->a_vp;
2922 nfsnode_t np = VTONFS(vp);
2923 struct nfsmount *nmp;
2924 struct nfs_open_file *nofp = NULL;
2925 off_t size;
2926 int error;
2927 int is_mapped_flag = 0;
2928
2929 nmp = VTONMP(vp);
2930 if (nfs_mount_gone(nmp))
2931 return (ENXIO);
2932
2933 nfs_node_lock_force(np);
2934 if (np->n_flag & NISMAPPED) {
2935 is_mapped_flag = 1;
2936 np->n_flag &= ~NISMAPPED;
2937 }
2938 nfs_node_unlock(np);
2939 if (is_mapped_flag) {
2940 lck_mtx_lock(&nmp->nm_lock);
2941 if (nmp->nm_mappers)
2942 nmp->nm_mappers--;
2943 else
2944 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
2945 lck_mtx_unlock(&nmp->nm_lock);
2946 }
2947
2948 /* flush buffers/ubc before we drop the open (in case it's our last open) */
2949 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
2950 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp)))
2951 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
2952
2953 /* walk all open files and close all mmap opens */
2954 loop:
2955 error = nfs_mount_state_in_use_start(nmp, NULL);
2956 if (error)
2957 return (error);
2958 lck_mtx_lock(&np->n_openlock);
2959 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2960 if (!nofp->nof_mmap_access)
2961 continue;
2962 lck_mtx_unlock(&np->n_openlock);
2963 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2964 nfs_mount_state_in_use_end(nmp, 0);
2965 error = nfs4_reopen(nofp, NULL);
2966 if (!error)
2967 goto loop;
2968 }
2969 if (!error)
2970 error = nfs_open_file_set_busy(nofp, NULL);
2971 if (error) {
2972 lck_mtx_lock(&np->n_openlock);
2973 break;
2974 }
2975 if (nofp->nof_mmap_access) {
2976 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2977 if (!nfs_mount_state_error_should_restart(error)) {
2978 if (error) /* not a state-operation-restarting error, so just clear the access */
2979 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2980 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2981 }
2982 if (error)
2983 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
2984 }
2985 nfs_open_file_clear_busy(nofp);
2986 nfs_mount_state_in_use_end(nmp, error);
2987 goto loop;
2988 }
2989 lck_mtx_unlock(&np->n_openlock);
2990 nfs_mount_state_in_use_end(nmp, error);
2991 return (error);
2992 }
2993
2994 /*
2995 * Search a node's lock owner list for the owner for this process.
2996 * If not found and "alloc" is set, then allocate a new one.
2997 */
2998 struct nfs_lock_owner *
2999 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3000 {
3001 pid_t pid = proc_pid(p);
3002 struct nfs_lock_owner *nlop, *newnlop = NULL;
3003
3004 tryagain:
3005 lck_mtx_lock(&np->n_openlock);
3006 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3007 if (nlop->nlo_pid != pid)
3008 continue;
3009 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
3010 break;
3011 /* stale lock owner... reuse it if we can */
3012 if (nlop->nlo_refcnt) {
3013 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3014 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3015 lck_mtx_unlock(&np->n_openlock);
3016 goto tryagain;
3017 }
3018 nlop->nlo_pid_start = p->p_start;
3019 nlop->nlo_seqid = 0;
3020 nlop->nlo_stategenid = 0;
3021 break;
3022 }
3023
3024 if (!nlop && !newnlop && alloc) {
3025 lck_mtx_unlock(&np->n_openlock);
3026 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3027 if (!newnlop)
3028 return (NULL);
3029 bzero(newnlop, sizeof(*newnlop));
3030 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3031 newnlop->nlo_pid = pid;
3032 newnlop->nlo_pid_start = p->p_start;
3033 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3034 TAILQ_INIT(&newnlop->nlo_locks);
3035 goto tryagain;
3036 }
3037 if (!nlop && newnlop) {
3038 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3039 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3040 nlop = newnlop;
3041 }
3042 lck_mtx_unlock(&np->n_openlock);
3043
3044 if (newnlop && (nlop != newnlop))
3045 nfs_lock_owner_destroy(newnlop);
3046
3047 if (nlop)
3048 nfs_lock_owner_ref(nlop);
3049
3050 return (nlop);
3051 }
3052
3053 /*
3054 * destroy a lock owner that's no longer needed
3055 */
3056 void
3057 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3058 {
3059 if (nlop->nlo_open_owner) {
3060 nfs_open_owner_rele(nlop->nlo_open_owner);
3061 nlop->nlo_open_owner = NULL;
3062 }
3063 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3064 FREE(nlop, M_TEMP);
3065 }
3066
3067 /*
3068 * acquire a reference count on a lock owner
3069 */
3070 void
3071 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3072 {
3073 lck_mtx_lock(&nlop->nlo_lock);
3074 nlop->nlo_refcnt++;
3075 lck_mtx_unlock(&nlop->nlo_lock);
3076 }
3077
3078 /*
3079 * drop a reference count on a lock owner and destroy it if
3080 * it is no longer referenced and no longer on the mount's list.
3081 */
3082 void
3083 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3084 {
3085 lck_mtx_lock(&nlop->nlo_lock);
3086 if (nlop->nlo_refcnt < 1)
3087 panic("nfs_lock_owner_rele: no refcnt");
3088 nlop->nlo_refcnt--;
3089 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3090 panic("nfs_lock_owner_rele: busy");
3091 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3092 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3093 lck_mtx_unlock(&nlop->nlo_lock);
3094 return;
3095 }
3096 /* owner is no longer referenced or linked to mount, so destroy it */
3097 lck_mtx_unlock(&nlop->nlo_lock);
3098 nfs_lock_owner_destroy(nlop);
3099 }
3100
3101 /*
3102 * Mark a lock owner as busy because we are about to
3103 * start an operation that uses and updates lock owner state.
3104 */
3105 int
3106 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3107 {
3108 struct nfsmount *nmp;
3109 struct timespec ts = {2, 0};
3110 int error = 0, slpflag;
3111
3112 nmp = nlop->nlo_open_owner->noo_mount;
3113 if (nfs_mount_gone(nmp))
3114 return (ENXIO);
3115 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3116
3117 lck_mtx_lock(&nlop->nlo_lock);
3118 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3119 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
3120 break;
3121 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3122 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3123 slpflag = 0;
3124 }
3125 if (!error)
3126 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3127 lck_mtx_unlock(&nlop->nlo_lock);
3128
3129 return (error);
3130 }
3131
3132 /*
3133 * Clear the busy flag on a lock owner and wake up anyone waiting
3134 * to mark it busy.
3135 */
3136 void
3137 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3138 {
3139 int wanted;
3140
3141 lck_mtx_lock(&nlop->nlo_lock);
3142 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
3143 panic("nfs_lock_owner_clear_busy");
3144 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3145 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
3146 lck_mtx_unlock(&nlop->nlo_lock);
3147 if (wanted)
3148 wakeup(nlop);
3149 }
3150
3151 /*
3152 * Insert a held lock into a lock owner's sorted list.
3153 * (flock locks are always inserted at the head the list)
3154 */
3155 void
3156 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3157 {
3158 struct nfs_file_lock *nflp;
3159
3160 /* insert new lock in lock owner's held lock list */
3161 lck_mtx_lock(&nlop->nlo_lock);
3162 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3163 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3164 } else {
3165 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3166 if (newnflp->nfl_start < nflp->nfl_start)
3167 break;
3168 }
3169 if (nflp)
3170 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3171 else
3172 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3173 }
3174 lck_mtx_unlock(&nlop->nlo_lock);
3175 }
3176
3177 /*
3178 * Get a file lock structure for this lock owner.
3179 */
3180 struct nfs_file_lock *
3181 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3182 {
3183 struct nfs_file_lock *nflp = NULL;
3184
3185 lck_mtx_lock(&nlop->nlo_lock);
3186 if (!nlop->nlo_alock.nfl_owner) {
3187 nflp = &nlop->nlo_alock;
3188 nflp->nfl_owner = nlop;
3189 }
3190 lck_mtx_unlock(&nlop->nlo_lock);
3191 if (!nflp) {
3192 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3193 if (!nflp)
3194 return (NULL);
3195 bzero(nflp, sizeof(*nflp));
3196 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3197 nflp->nfl_owner = nlop;
3198 }
3199 nfs_lock_owner_ref(nlop);
3200 return (nflp);
3201 }
3202
3203 /*
3204 * destroy the given NFS file lock structure
3205 */
3206 void
3207 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3208 {
3209 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3210
3211 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3212 nflp->nfl_owner = NULL;
3213 FREE(nflp, M_TEMP);
3214 } else {
3215 lck_mtx_lock(&nlop->nlo_lock);
3216 bzero(nflp, sizeof(*nflp));
3217 lck_mtx_unlock(&nlop->nlo_lock);
3218 }
3219 nfs_lock_owner_rele(nlop);
3220 }
3221
3222 /*
3223 * Check if one file lock conflicts with another.
3224 * (nflp1 is the new lock. nflp2 is the existing lock.)
3225 */
3226 int
3227 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3228 {
3229 /* no conflict if lock is dead */
3230 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
3231 return (0);
3232 /* no conflict if it's ours - unless the lock style doesn't match */
3233 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3234 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3235 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3236 (nflp1->nfl_start > nflp2->nfl_start) &&
3237 (nflp1->nfl_end < nflp2->nfl_end))
3238 *willsplit = 1;
3239 return (0);
3240 }
3241 /* no conflict if ranges don't overlap */
3242 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
3243 return (0);
3244 /* no conflict if neither lock is exclusive */
3245 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
3246 return (0);
3247 /* conflict */
3248 return (1);
3249 }
3250
3251 /*
3252 * Send an NFSv4 LOCK RPC to the server.
3253 */
3254 int
3255 nfs4_setlock_rpc(
3256 nfsnode_t np,
3257 struct nfs_open_file *nofp,
3258 struct nfs_file_lock *nflp,
3259 int reclaim,
3260 int flags,
3261 thread_t thd,
3262 kauth_cred_t cred)
3263 {
3264 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3265 struct nfsmount *nmp;
3266 struct nfsm_chain nmreq, nmrep;
3267 uint64_t xid;
3268 uint32_t locktype;
3269 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3270 struct nfsreq_secinfo_args si;
3271
3272 nmp = NFSTONMP(np);
3273 if (nfs_mount_gone(nmp))
3274 return (ENXIO);
3275 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3276 return (EINVAL);
3277
3278 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3279 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3280 ((nflp->nfl_type == F_WRLCK) ?
3281 NFS_LOCK_TYPE_WRITEW :
3282 NFS_LOCK_TYPE_READW) :
3283 ((nflp->nfl_type == F_WRLCK) ?
3284 NFS_LOCK_TYPE_WRITE :
3285 NFS_LOCK_TYPE_READ);
3286 if (newlocker) {
3287 error = nfs_open_file_set_busy(nofp, thd);
3288 if (error)
3289 return (error);
3290 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3291 if (error) {
3292 nfs_open_file_clear_busy(nofp);
3293 return (error);
3294 }
3295 if (!nlop->nlo_open_owner) {
3296 nfs_open_owner_ref(nofp->nof_owner);
3297 nlop->nlo_open_owner = nofp->nof_owner;
3298 }
3299 }
3300 error = nfs_lock_owner_set_busy(nlop, thd);
3301 if (error) {
3302 if (newlocker) {
3303 nfs_open_owner_clear_busy(nofp->nof_owner);
3304 nfs_open_file_clear_busy(nofp);
3305 }
3306 return (error);
3307 }
3308
3309 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3310 nfsm_chain_null(&nmreq);
3311 nfsm_chain_null(&nmrep);
3312
3313 // PUTFH, GETATTR, LOCK
3314 numops = 3;
3315 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3316 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3317 numops--;
3318 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3319 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3320 numops--;
3321 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3322 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3323 numops--;
3324 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3325 nfsm_chain_add_32(error, &nmreq, locktype);
3326 nfsm_chain_add_32(error, &nmreq, reclaim);
3327 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3328 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3329 nfsm_chain_add_32(error, &nmreq, newlocker);
3330 if (newlocker) {
3331 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3332 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3333 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3334 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3335 } else {
3336 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3337 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3338 }
3339 nfsm_chain_build_done(error, &nmreq);
3340 nfsm_assert(error, (numops == 0), EPROTO);
3341 nfsmout_if(error);
3342
3343 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3344
3345 if ((lockerror = nfs_node_lock(np)))
3346 error = lockerror;
3347 nfsm_chain_skip_tag(error, &nmrep);
3348 nfsm_chain_get_32(error, &nmrep, numops);
3349 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3350 nfsmout_if(error);
3351 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3352 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3353 nfsmout_if(error);
3354 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3355 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3356 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3357
3358 /* Update the lock owner's stategenid once it appears the server has state for it. */
3359 /* We determine this by noting the request was successful (we got a stateid). */
3360 if (newlocker && !error)
3361 nlop->nlo_stategenid = nmp->nm_stategenid;
3362 nfsmout:
3363 if (!lockerror)
3364 nfs_node_unlock(np);
3365 nfs_lock_owner_clear_busy(nlop);
3366 if (newlocker) {
3367 nfs_open_owner_clear_busy(nofp->nof_owner);
3368 nfs_open_file_clear_busy(nofp);
3369 }
3370 nfsm_chain_cleanup(&nmreq);
3371 nfsm_chain_cleanup(&nmrep);
3372 return (error);
3373 }
3374
3375 /*
3376 * Send an NFSv4 LOCKU RPC to the server.
3377 */
3378 int
3379 nfs4_unlock_rpc(
3380 nfsnode_t np,
3381 struct nfs_lock_owner *nlop,
3382 int type,
3383 uint64_t start,
3384 uint64_t end,
3385 int flags,
3386 thread_t thd,
3387 kauth_cred_t cred)
3388 {
3389 struct nfsmount *nmp;
3390 struct nfsm_chain nmreq, nmrep;
3391 uint64_t xid;
3392 int error = 0, lockerror = ENOENT, numops, status;
3393 struct nfsreq_secinfo_args si;
3394
3395 nmp = NFSTONMP(np);
3396 if (nfs_mount_gone(nmp))
3397 return (ENXIO);
3398 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3399 return (EINVAL);
3400
3401 error = nfs_lock_owner_set_busy(nlop, NULL);
3402 if (error)
3403 return (error);
3404
3405 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3406 nfsm_chain_null(&nmreq);
3407 nfsm_chain_null(&nmrep);
3408
3409 // PUTFH, GETATTR, LOCKU
3410 numops = 3;
3411 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3412 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3413 numops--;
3414 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3415 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3416 numops--;
3417 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3418 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3419 numops--;
3420 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3421 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3422 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3423 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3424 nfsm_chain_add_64(error, &nmreq, start);
3425 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3426 nfsm_chain_build_done(error, &nmreq);
3427 nfsm_assert(error, (numops == 0), EPROTO);
3428 nfsmout_if(error);
3429
3430 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
3431
3432 if ((lockerror = nfs_node_lock(np)))
3433 error = lockerror;
3434 nfsm_chain_skip_tag(error, &nmrep);
3435 nfsm_chain_get_32(error, &nmrep, numops);
3436 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3437 nfsmout_if(error);
3438 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3439 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3440 nfsmout_if(error);
3441 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3442 nfs_owner_seqid_increment(NULL, nlop, error);
3443 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3444 nfsmout:
3445 if (!lockerror)
3446 nfs_node_unlock(np);
3447 nfs_lock_owner_clear_busy(nlop);
3448 nfsm_chain_cleanup(&nmreq);
3449 nfsm_chain_cleanup(&nmrep);
3450 return (error);
3451 }
3452
3453 /*
3454 * Send an NFSv4 LOCKT RPC to the server.
3455 */
3456 int
3457 nfs4_getlock_rpc(
3458 nfsnode_t np,
3459 struct nfs_lock_owner *nlop,
3460 struct flock *fl,
3461 uint64_t start,
3462 uint64_t end,
3463 vfs_context_t ctx)
3464 {
3465 struct nfsmount *nmp;
3466 struct nfsm_chain nmreq, nmrep;
3467 uint64_t xid, val64 = 0;
3468 uint32_t val = 0;
3469 int error = 0, lockerror, numops, status;
3470 struct nfsreq_secinfo_args si;
3471
3472 nmp = NFSTONMP(np);
3473 if (nfs_mount_gone(nmp))
3474 return (ENXIO);
3475 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
3476 return (EINVAL);
3477
3478 lockerror = ENOENT;
3479 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3480 nfsm_chain_null(&nmreq);
3481 nfsm_chain_null(&nmrep);
3482
3483 // PUTFH, GETATTR, LOCKT
3484 numops = 3;
3485 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3486 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3487 numops--;
3488 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3489 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3490 numops--;
3491 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3492 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3493 numops--;
3494 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3495 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3496 nfsm_chain_add_64(error, &nmreq, start);
3497 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3498 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3499 nfsm_chain_build_done(error, &nmreq);
3500 nfsm_assert(error, (numops == 0), EPROTO);
3501 nfsmout_if(error);
3502
3503 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3504
3505 if ((lockerror = nfs_node_lock(np)))
3506 error = lockerror;
3507 nfsm_chain_skip_tag(error, &nmrep);
3508 nfsm_chain_get_32(error, &nmrep, numops);
3509 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3510 nfsmout_if(error);
3511 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3512 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3513 nfsmout_if(error);
3514 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3515 if (error == NFSERR_DENIED) {
3516 error = 0;
3517 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3518 nfsm_chain_get_64(error, &nmrep, val64);
3519 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3520 nfsm_chain_get_32(error, &nmrep, val);
3521 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3522 fl->l_pid = 0;
3523 fl->l_whence = SEEK_SET;
3524 } else if (!error) {
3525 fl->l_type = F_UNLCK;
3526 }
3527 nfsmout:
3528 if (!lockerror)
3529 nfs_node_unlock(np);
3530 nfsm_chain_cleanup(&nmreq);
3531 nfsm_chain_cleanup(&nmrep);
3532 return (error);
3533 }
3534
3535
3536 /*
3537 * Check for any conflicts with the given lock.
3538 *
3539 * Checking for a lock doesn't require the file to be opened.
3540 * So we skip all the open owner, open file, lock owner work
3541 * and just check for a conflicting lock.
3542 */
3543 int
3544 nfs_advlock_getlock(
3545 nfsnode_t np,
3546 struct nfs_lock_owner *nlop,
3547 struct flock *fl,
3548 uint64_t start,
3549 uint64_t end,
3550 vfs_context_t ctx)
3551 {
3552 struct nfsmount *nmp;
3553 struct nfs_file_lock *nflp;
3554 int error = 0, answered = 0;
3555
3556 nmp = NFSTONMP(np);
3557 if (nfs_mount_gone(nmp))
3558 return (ENXIO);
3559
3560 restart:
3561 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx))))
3562 return (error);
3563
3564 lck_mtx_lock(&np->n_openlock);
3565 /* scan currently held locks for conflict */
3566 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3567 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3568 continue;
3569 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3570 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3571 break;
3572 }
3573 if (nflp) {
3574 /* found a conflicting lock */
3575 fl->l_type = nflp->nfl_type;
3576 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3577 fl->l_start = nflp->nfl_start;
3578 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3579 fl->l_whence = SEEK_SET;
3580 answered = 1;
3581 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3582 /*
3583 * If we have a write delegation, we know there can't be other
3584 * locks on the server. So the answer is no conflicting lock found.
3585 */
3586 fl->l_type = F_UNLCK;
3587 answered = 1;
3588 }
3589 lck_mtx_unlock(&np->n_openlock);
3590 if (answered) {
3591 nfs_mount_state_in_use_end(nmp, 0);
3592 return (0);
3593 }
3594
3595 /* no conflict found locally, so ask the server */
3596 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3597
3598 if (nfs_mount_state_in_use_end(nmp, error))
3599 goto restart;
3600 return (error);
3601 }
3602
3603 /*
3604 * Acquire a file lock for the given range.
3605 *
3606 * Add the lock (request) to the lock queue.
3607 * Scan the lock queue for any conflicting locks.
3608 * If a conflict is found, block or return an error.
3609 * Once end of queue is reached, send request to the server.
3610 * If the server grants the lock, scan the lock queue and
3611 * update any existing locks. Then (optionally) scan the
3612 * queue again to coalesce any locks adjacent to the new one.
3613 */
3614 int
3615 nfs_advlock_setlock(
3616 nfsnode_t np,
3617 struct nfs_open_file *nofp,
3618 struct nfs_lock_owner *nlop,
3619 int op,
3620 uint64_t start,
3621 uint64_t end,
3622 int style,
3623 short type,
3624 vfs_context_t ctx)
3625 {
3626 struct nfsmount *nmp;
3627 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3628 struct nfs_file_lock *coalnflp;
3629 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3630 struct timespec ts = {1, 0};
3631
3632 nmp = NFSTONMP(np);
3633 if (nfs_mount_gone(nmp))
3634 return (ENXIO);
3635 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3636
3637 if ((type != F_RDLCK) && (type != F_WRLCK))
3638 return (EINVAL);
3639
3640 /* allocate a new lock */
3641 newnflp = nfs_file_lock_alloc(nlop);
3642 if (!newnflp)
3643 return (ENOLCK);
3644 newnflp->nfl_start = start;
3645 newnflp->nfl_end = end;
3646 newnflp->nfl_type = type;
3647 if (op == F_SETLKW)
3648 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3649 newnflp->nfl_flags |= style;
3650 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3651
3652 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3653 /*
3654 * For exclusive flock-style locks, if we block waiting for the
3655 * lock, we need to first release any currently held shared
3656 * flock-style lock. So, the first thing we do is check if we
3657 * have a shared flock-style lock.
3658 */
3659 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3660 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3661 nflp = NULL;
3662 if (nflp && (nflp->nfl_type != F_RDLCK))
3663 nflp = NULL;
3664 flocknflp = nflp;
3665 }
3666
3667 restart:
3668 restart = 0;
3669 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3670 if (error)
3671 goto error_out;
3672 inuse = 1;
3673 if (np->n_flag & NREVOKE) {
3674 error = EIO;
3675 nfs_mount_state_in_use_end(nmp, 0);
3676 inuse = 0;
3677 goto error_out;
3678 }
3679 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3680 nfs_mount_state_in_use_end(nmp, 0);
3681 inuse = 0;
3682 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3683 if (error)
3684 goto error_out;
3685 goto restart;
3686 }
3687
3688 lck_mtx_lock(&np->n_openlock);
3689 if (!inqueue) {
3690 /* insert new lock at beginning of list */
3691 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3692 inqueue = 1;
3693 }
3694
3695 /* scan current list of locks (held and pending) for conflicts */
3696 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3697 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3698 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3699 continue;
3700 /* Conflict */
3701 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3702 error = EAGAIN;
3703 break;
3704 }
3705 /* Block until this lock is no longer held. */
3706 if (nflp->nfl_blockcnt == UINT_MAX) {
3707 error = ENOLCK;
3708 break;
3709 }
3710 nflp->nfl_blockcnt++;
3711 do {
3712 if (flocknflp) {
3713 /* release any currently held shared lock before sleeping */
3714 lck_mtx_unlock(&np->n_openlock);
3715 nfs_mount_state_in_use_end(nmp, 0);
3716 inuse = 0;
3717 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3718 flocknflp = NULL;
3719 if (!error)
3720 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3721 if (error) {
3722 lck_mtx_lock(&np->n_openlock);
3723 break;
3724 }
3725 inuse = 1;
3726 lck_mtx_lock(&np->n_openlock);
3727 /* no need to block/sleep if the conflict is gone */
3728 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3729 break;
3730 }
3731 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
3732 slpflag = 0;
3733 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3734 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3735 /* looks like we have a recover pending... restart */
3736 restart = 1;
3737 lck_mtx_unlock(&np->n_openlock);
3738 nfs_mount_state_in_use_end(nmp, 0);
3739 inuse = 0;
3740 lck_mtx_lock(&np->n_openlock);
3741 break;
3742 }
3743 if (!error && (np->n_flag & NREVOKE))
3744 error = EIO;
3745 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3746 nflp->nfl_blockcnt--;
3747 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3748 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3749 nfs_file_lock_destroy(nflp);
3750 }
3751 if (error || restart)
3752 break;
3753 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
3754 /* So, start this lock-scanning loop over from where it started. */
3755 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
3756 }
3757 lck_mtx_unlock(&np->n_openlock);
3758 if (restart)
3759 goto restart;
3760 if (error)
3761 goto error_out;
3762
3763 if (willsplit) {
3764 /*
3765 * It looks like this operation is splitting a lock.
3766 * We allocate a new lock now so we don't have to worry
3767 * about the allocation failing after we've updated some state.
3768 */
3769 nflp2 = nfs_file_lock_alloc(nlop);
3770 if (!nflp2) {
3771 error = ENOLCK;
3772 goto error_out;
3773 }
3774 }
3775
3776 /* once scan for local conflicts is clear, send request to server */
3777 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx))))
3778 goto error_out;
3779 busy = 1;
3780 delay = 0;
3781 do {
3782 /* do we have a delegation? (that we're not returning?) */
3783 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
3784 if (np->n_openflags & N_DELEG_WRITE) {
3785 /* with a write delegation, just take the lock delegated */
3786 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
3787 error = 0;
3788 /* make sure the lock owner knows its open owner */
3789 if (!nlop->nlo_open_owner) {
3790 nfs_open_owner_ref(nofp->nof_owner);
3791 nlop->nlo_open_owner = nofp->nof_owner;
3792 }
3793 break;
3794 } else {
3795 /*
3796 * If we don't have any non-delegated opens but we do have
3797 * delegated opens, then we need to first claim the delegated
3798 * opens so that the lock request on the server can be associated
3799 * with an open it knows about.
3800 */
3801 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
3802 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
3803 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
3804 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
3805 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
3806 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
3807 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
3808 if (error)
3809 break;
3810 }
3811 }
3812 }
3813 if (np->n_flag & NREVOKE)
3814 error = EIO;
3815 if (!error)
3816 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
3817 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3818 break;
3819 /* request was denied due to either conflict or grace period */
3820 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3821 error = EAGAIN;
3822 break;
3823 }
3824 if (flocknflp) {
3825 /* release any currently held shared lock before sleeping */
3826 nfs_open_state_clear_busy(np);
3827 busy = 0;
3828 nfs_mount_state_in_use_end(nmp, 0);
3829 inuse = 0;
3830 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3831 flocknflp = NULL;
3832 if (!error2)
3833 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3834 if (!error2) {
3835 inuse = 1;
3836 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
3837 }
3838 if (error2) {
3839 error = error2;
3840 break;
3841 }
3842 busy = 1;
3843 }
3844 /*
3845 * Wait a little bit and send the request again.
3846 * Except for retries of blocked v2/v3 request where we've already waited a bit.
3847 */
3848 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
3849 if (error == NFSERR_GRACE)
3850 delay = 4;
3851 if (delay < 4)
3852 delay++;
3853 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2));
3854 slpflag = 0;
3855 }
3856 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3857 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3858 /* looks like we have a recover pending... restart */
3859 nfs_open_state_clear_busy(np);
3860 busy = 0;
3861 nfs_mount_state_in_use_end(nmp, 0);
3862 inuse = 0;
3863 goto restart;
3864 }
3865 if (!error && (np->n_flag & NREVOKE))
3866 error = EIO;
3867 } while (!error);
3868
3869 error_out:
3870 if (nfs_mount_state_error_should_restart(error)) {
3871 /* looks like we need to restart this operation */
3872 if (busy) {
3873 nfs_open_state_clear_busy(np);
3874 busy = 0;
3875 }
3876 if (inuse) {
3877 nfs_mount_state_in_use_end(nmp, error);
3878 inuse = 0;
3879 }
3880 goto restart;
3881 }
3882 lck_mtx_lock(&np->n_openlock);
3883 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3884 if (error) {
3885 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3886 if (newnflp->nfl_blockcnt) {
3887 /* wake up anyone blocked on this lock */
3888 wakeup(newnflp);
3889 } else {
3890 /* remove newnflp from lock list and destroy */
3891 if (inqueue)
3892 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3893 nfs_file_lock_destroy(newnflp);
3894 }
3895 lck_mtx_unlock(&np->n_openlock);
3896 if (busy)
3897 nfs_open_state_clear_busy(np);
3898 if (inuse)
3899 nfs_mount_state_in_use_end(nmp, error);
3900 if (nflp2)
3901 nfs_file_lock_destroy(nflp2);
3902 return (error);
3903 }
3904
3905 /* server granted the lock */
3906
3907 /*
3908 * Scan for locks to update.
3909 *
3910 * Locks completely covered are killed.
3911 * At most two locks may need to be clipped.
3912 * It's possible that a single lock may need to be split.
3913 */
3914 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3915 if (nflp == newnflp)
3916 continue;
3917 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3918 continue;
3919 if (nflp->nfl_owner != nlop)
3920 continue;
3921 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3922 continue;
3923 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3924 continue;
3925 /* here's one to update */
3926 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3927 /* The entire lock is being replaced. */
3928 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3929 lck_mtx_lock(&nlop->nlo_lock);
3930 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3931 lck_mtx_unlock(&nlop->nlo_lock);
3932 /* lock will be destroyed below, if no waiters */
3933 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3934 /* We're replacing a range in the middle of a lock. */
3935 /* The current lock will be split into two locks. */
3936 /* Update locks and insert new lock after current lock. */
3937 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
3938 nflp2->nfl_type = nflp->nfl_type;
3939 nflp2->nfl_start = newnflp->nfl_end + 1;
3940 nflp2->nfl_end = nflp->nfl_end;
3941 nflp->nfl_end = newnflp->nfl_start - 1;
3942 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3943 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3944 nextnflp = nflp2;
3945 nflp2 = NULL;
3946 } else if (newnflp->nfl_start > nflp->nfl_start) {
3947 /* We're replacing the end of a lock. */
3948 nflp->nfl_end = newnflp->nfl_start - 1;
3949 } else if (newnflp->nfl_end < nflp->nfl_end) {
3950 /* We're replacing the start of a lock. */
3951 nflp->nfl_start = newnflp->nfl_end + 1;
3952 }
3953 if (nflp->nfl_blockcnt) {
3954 /* wake up anyone blocked on this lock */
3955 wakeup(nflp);
3956 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3957 /* remove nflp from lock list and destroy */
3958 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3959 nfs_file_lock_destroy(nflp);
3960 }
3961 }
3962
3963 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3964
3965 /*
3966 * POSIX locks should be coalesced when possible.
3967 */
3968 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3969 /*
3970 * Walk through the lock queue and check each of our held locks with
3971 * the previous and next locks in the lock owner's "held lock list".
3972 * If the two locks can be coalesced, we merge the current lock into
3973 * the other (previous or next) lock. Merging this way makes sure that
3974 * lock ranges are always merged forward in the lock queue. This is
3975 * important because anyone blocked on the lock being "merged away"
3976 * will still need to block on that range and it will simply continue
3977 * checking locks that are further down the list.
3978 */
3979 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3980 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3981 continue;
3982 if (nflp->nfl_owner != nlop)
3983 continue;
3984 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3985 continue;
3986 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3987 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3988 (coalnflp->nfl_type == nflp->nfl_type) &&
3989 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3990 coalnflp->nfl_end = nflp->nfl_end;
3991 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3992 lck_mtx_lock(&nlop->nlo_lock);
3993 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3994 lck_mtx_unlock(&nlop->nlo_lock);
3995 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3996 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3997 (coalnflp->nfl_type == nflp->nfl_type) &&
3998 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
3999 coalnflp->nfl_start = nflp->nfl_start;
4000 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4001 lck_mtx_lock(&nlop->nlo_lock);
4002 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4003 lck_mtx_unlock(&nlop->nlo_lock);
4004 }
4005 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
4006 continue;
4007 if (nflp->nfl_blockcnt) {
4008 /* wake up anyone blocked on this lock */
4009 wakeup(nflp);
4010 } else {
4011 /* remove nflp from lock list and destroy */
4012 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4013 nfs_file_lock_destroy(nflp);
4014 }
4015 }
4016 }
4017
4018 lck_mtx_unlock(&np->n_openlock);
4019 nfs_open_state_clear_busy(np);
4020 nfs_mount_state_in_use_end(nmp, error);
4021
4022 if (nflp2)
4023 nfs_file_lock_destroy(nflp2);
4024 return (error);
4025 }
4026
4027 /*
4028 * Release all (same style) locks within the given range.
4029 */
4030 int
4031 nfs_advlock_unlock(
4032 nfsnode_t np,
4033 struct nfs_open_file *nofp,
4034 struct nfs_lock_owner *nlop,
4035 uint64_t start,
4036 uint64_t end,
4037 int style,
4038 vfs_context_t ctx)
4039 {
4040 struct nfsmount *nmp;
4041 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4042 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4043
4044 nmp = NFSTONMP(np);
4045 if (nfs_mount_gone(nmp))
4046 return (ENXIO);
4047
4048 restart:
4049 if ((error = nfs_mount_state_in_use_start(nmp, NULL)))
4050 return (error);
4051 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4052 nfs_mount_state_in_use_end(nmp, 0);
4053 error = nfs4_reopen(nofp, NULL);
4054 if (error)
4055 return (error);
4056 goto restart;
4057 }
4058 if ((error = nfs_open_state_set_busy(np, NULL))) {
4059 nfs_mount_state_in_use_end(nmp, error);
4060 return (error);
4061 }
4062
4063 lck_mtx_lock(&np->n_openlock);
4064 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4065 /*
4066 * We may need to allocate a new lock if an existing lock gets split.
4067 * So, we first scan the list to check for a split, and if there's
4068 * going to be one, we'll allocate one now.
4069 */
4070 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4071 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4072 continue;
4073 if (nflp->nfl_owner != nlop)
4074 continue;
4075 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4076 continue;
4077 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4078 continue;
4079 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4080 willsplit = 1;
4081 break;
4082 }
4083 }
4084 if (willsplit) {
4085 lck_mtx_unlock(&np->n_openlock);
4086 nfs_open_state_clear_busy(np);
4087 nfs_mount_state_in_use_end(nmp, 0);
4088 newnflp = nfs_file_lock_alloc(nlop);
4089 if (!newnflp)
4090 return (ENOMEM);
4091 goto restart;
4092 }
4093 }
4094
4095 /*
4096 * Free all of our locks in the given range.
4097 *
4098 * Note that this process requires sending requests to the server.
4099 * Because of this, we will release the n_openlock while performing
4100 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4101 * locks from changing underneath us. However, other entries in the
4102 * list may be removed. So we need to be careful walking the list.
4103 */
4104
4105 /*
4106 * Don't unlock ranges that are held by other-style locks.
4107 * If style is posix, don't send any unlock rpcs if flock is held.
4108 * If we unlock an flock, don't send unlock rpcs for any posix-style
4109 * ranges held - instead send unlocks for the ranges not held.
4110 */
4111 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4112 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4113 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
4114 send_unlock_rpcs = 0;
4115 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4116 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4117 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4118 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4119 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4120 uint64_t s = 0;
4121 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4122 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4123 while (!delegated && nflp) {
4124 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4125 /* unlock the range preceding this lock */
4126 lck_mtx_unlock(&np->n_openlock);
4127 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0,
4128 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4129 if (nfs_mount_state_error_should_restart(error)) {
4130 nfs_open_state_clear_busy(np);
4131 nfs_mount_state_in_use_end(nmp, error);
4132 goto restart;
4133 }
4134 lck_mtx_lock(&np->n_openlock);
4135 if (error)
4136 goto out;
4137 s = nflp->nfl_end+1;
4138 }
4139 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4140 }
4141 if (!delegated) {
4142 lck_mtx_unlock(&np->n_openlock);
4143 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4144 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4145 if (nfs_mount_state_error_should_restart(error)) {
4146 nfs_open_state_clear_busy(np);
4147 nfs_mount_state_in_use_end(nmp, error);
4148 goto restart;
4149 }
4150 lck_mtx_lock(&np->n_openlock);
4151 if (error)
4152 goto out;
4153 }
4154 send_unlock_rpcs = 0;
4155 }
4156
4157 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4158 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
4159 continue;
4160 if (nflp->nfl_owner != nlop)
4161 continue;
4162 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
4163 continue;
4164 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
4165 continue;
4166 /* here's one to unlock */
4167 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4168 /* The entire lock is being unlocked. */
4169 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4170 lck_mtx_unlock(&np->n_openlock);
4171 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4172 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4173 if (nfs_mount_state_error_should_restart(error)) {
4174 nfs_open_state_clear_busy(np);
4175 nfs_mount_state_in_use_end(nmp, error);
4176 goto restart;
4177 }
4178 lck_mtx_lock(&np->n_openlock);
4179 }
4180 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4181 if (error)
4182 break;
4183 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4184 lck_mtx_lock(&nlop->nlo_lock);
4185 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4186 lck_mtx_unlock(&nlop->nlo_lock);
4187 /* lock will be destroyed below, if no waiters */
4188 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4189 /* We're unlocking a range in the middle of a lock. */
4190 /* The current lock will be split into two locks. */
4191 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4192 lck_mtx_unlock(&np->n_openlock);
4193 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4194 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4195 if (nfs_mount_state_error_should_restart(error)) {
4196 nfs_open_state_clear_busy(np);
4197 nfs_mount_state_in_use_end(nmp, error);
4198 goto restart;
4199 }
4200 lck_mtx_lock(&np->n_openlock);
4201 }
4202 if (error)
4203 break;
4204 /* update locks and insert new lock after current lock */
4205 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED));
4206 newnflp->nfl_type = nflp->nfl_type;
4207 newnflp->nfl_start = end + 1;
4208 newnflp->nfl_end = nflp->nfl_end;
4209 nflp->nfl_end = start - 1;
4210 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4211 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4212 nextnflp = newnflp;
4213 newnflp = NULL;
4214 } else if (start > nflp->nfl_start) {
4215 /* We're unlocking the end of a lock. */
4216 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4217 lck_mtx_unlock(&np->n_openlock);
4218 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4219 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4220 if (nfs_mount_state_error_should_restart(error)) {
4221 nfs_open_state_clear_busy(np);
4222 nfs_mount_state_in_use_end(nmp, error);
4223 goto restart;
4224 }
4225 lck_mtx_lock(&np->n_openlock);
4226 }
4227 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4228 if (error)
4229 break;
4230 nflp->nfl_end = start - 1;
4231 } else if (end < nflp->nfl_end) {
4232 /* We're unlocking the start of a lock. */
4233 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4234 lck_mtx_unlock(&np->n_openlock);
4235 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4236 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4237 if (nfs_mount_state_error_should_restart(error)) {
4238 nfs_open_state_clear_busy(np);
4239 nfs_mount_state_in_use_end(nmp, error);
4240 goto restart;
4241 }
4242 lck_mtx_lock(&np->n_openlock);
4243 }
4244 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4245 if (error)
4246 break;
4247 nflp->nfl_start = end + 1;
4248 }
4249 if (nflp->nfl_blockcnt) {
4250 /* wake up anyone blocked on this lock */
4251 wakeup(nflp);
4252 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4253 /* remove nflp from lock list and destroy */
4254 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4255 nfs_file_lock_destroy(nflp);
4256 }
4257 }
4258 out:
4259 lck_mtx_unlock(&np->n_openlock);
4260 nfs_open_state_clear_busy(np);
4261 nfs_mount_state_in_use_end(nmp, 0);
4262
4263 if (newnflp)
4264 nfs_file_lock_destroy(newnflp);
4265 return (error);
4266 }
4267
4268 /*
4269 * NFSv4 advisory file locking
4270 */
4271 int
4272 nfs_vnop_advlock(
4273 struct vnop_advlock_args /* {
4274 struct vnodeop_desc *a_desc;
4275 vnode_t a_vp;
4276 caddr_t a_id;
4277 int a_op;
4278 struct flock *a_fl;
4279 int a_flags;
4280 vfs_context_t a_context;
4281 } */ *ap)
4282 {
4283 vnode_t vp = ap->a_vp;
4284 nfsnode_t np = VTONFS(ap->a_vp);
4285 struct flock *fl = ap->a_fl;
4286 int op = ap->a_op;
4287 int flags = ap->a_flags;
4288 vfs_context_t ctx = ap->a_context;
4289 struct nfsmount *nmp;
4290 struct nfs_open_owner *noop = NULL;
4291 struct nfs_open_file *nofp = NULL;
4292 struct nfs_lock_owner *nlop = NULL;
4293 off_t lstart;
4294 uint64_t start, end;
4295 int error = 0, modified, style;
4296 enum vtype vtype;
4297 #define OFF_MAX QUAD_MAX
4298
4299 nmp = VTONMP(ap->a_vp);
4300 if (nfs_mount_gone(nmp))
4301 return (ENXIO);
4302 lck_mtx_lock(&nmp->nm_lock);
4303 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4304 lck_mtx_unlock(&nmp->nm_lock);
4305 return (ENOTSUP);
4306 }
4307 lck_mtx_unlock(&nmp->nm_lock);
4308
4309 if (np->n_flag & NREVOKE)
4310 return (EIO);
4311 vtype = vnode_vtype(ap->a_vp);
4312 if (vtype == VDIR) /* ignore lock requests on directories */
4313 return (0);
4314 if (vtype != VREG) /* anything other than regular files is invalid */
4315 return (EINVAL);
4316
4317 /* Convert the flock structure into a start and end. */
4318 switch (fl->l_whence) {
4319 case SEEK_SET:
4320 case SEEK_CUR:
4321 /*
4322 * Caller is responsible for adding any necessary offset
4323 * to fl->l_start when SEEK_CUR is used.
4324 */
4325 lstart = fl->l_start;
4326 break;
4327 case SEEK_END:
4328 /* need to flush, and refetch attributes to make */
4329 /* sure we have the correct end of file offset */
4330 if ((error = nfs_node_lock(np)))
4331 return (error);
4332 modified = (np->n_flag & NMODIFIED);
4333 nfs_node_unlock(np);
4334 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
4335 return (error);
4336 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED)))
4337 return (error);
4338 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4339 if ((np->n_size > OFF_MAX) ||
4340 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
4341 error = EOVERFLOW;
4342 lstart = np->n_size + fl->l_start;
4343 nfs_data_unlock(np);
4344 if (error)
4345 return (error);
4346 break;
4347 default:
4348 return (EINVAL);
4349 }
4350 if (lstart < 0)
4351 return (EINVAL);
4352 start = lstart;
4353 if (fl->l_len == 0) {
4354 end = UINT64_MAX;
4355 } else if (fl->l_len > 0) {
4356 if ((fl->l_len - 1) > (OFF_MAX - lstart))
4357 return (EOVERFLOW);
4358 end = start - 1 + fl->l_len;
4359 } else { /* l_len is negative */
4360 if ((lstart + fl->l_len) < 0)
4361 return (EINVAL);
4362 end = start - 1;
4363 start += fl->l_len;
4364 }
4365 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX))))
4366 return (EINVAL);
4367
4368 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4369 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
4370 return (EINVAL);
4371
4372 /* find the lock owner, alloc if not unlock */
4373 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4374 if (!nlop) {
4375 error = (op == F_UNLCK) ? 0 : ENOMEM;
4376 if (error)
4377 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4378 goto out;
4379 }
4380
4381 if (op == F_GETLK) {
4382 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4383 } else {
4384 /* find the open owner */
4385 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4386 if (!noop) {
4387 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4388 error = EPERM;
4389 goto out;
4390 }
4391 /* find the open file */
4392 restart:
4393 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4394 if (error)
4395 error = EBADF;
4396 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4397 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4398 error = EIO;
4399 }
4400 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4401 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4402 nofp = NULL;
4403 if (!error)
4404 goto restart;
4405 }
4406 if (error) {
4407 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4408 goto out;
4409 }
4410 if (op == F_UNLCK) {
4411 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4412 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4413 if ((op == F_SETLK) && (flags & F_WAIT))
4414 op = F_SETLKW;
4415 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4416 } else {
4417 /* not getlk, unlock or lock? */
4418 error = EINVAL;
4419 }
4420 }
4421
4422 out:
4423 if (nlop)
4424 nfs_lock_owner_rele(nlop);
4425 if (noop)
4426 nfs_open_owner_rele(noop);
4427 return (error);
4428 }
4429
4430 /*
4431 * Check if an open owner holds any locks on a file.
4432 */
4433 int
4434 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4435 {
4436 struct nfs_lock_owner *nlop;
4437
4438 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4439 if (nlop->nlo_open_owner != noop)
4440 continue;
4441 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4442 break;
4443 }
4444 return (nlop ? 1 : 0);
4445 }
4446
4447 /*
4448 * Reopen simple (no deny, no locks) open state that was lost.
4449 */
4450 int
4451 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4452 {
4453 struct nfs_open_owner *noop = nofp->nof_owner;
4454 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4455 nfsnode_t np = nofp->nof_np;
4456 vnode_t vp = NFSTOV(np);
4457 vnode_t dvp = NULL;
4458 struct componentname cn;
4459 const char *vname = NULL;
4460 const char *name = NULL;
4461 size_t namelen;
4462 char smallname[128];
4463 char *filename = NULL;
4464 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4465 struct timespec ts = { 1, 0 };
4466
4467 lck_mtx_lock(&nofp->nof_lock);
4468 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4469 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4470 break;
4471 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
4472 slpflag = 0;
4473 }
4474 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4475 lck_mtx_unlock(&nofp->nof_lock);
4476 return (error);
4477 }
4478 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4479 lck_mtx_unlock(&nofp->nof_lock);
4480
4481 nfs_node_lock_force(np);
4482 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4483 /*
4484 * The node's been sillyrenamed, so we need to use
4485 * the sillyrename directory/name to do the open.
4486 */
4487 struct nfs_sillyrename *nsp = np->n_sillyrename;
4488 dvp = NFSTOV(nsp->nsr_dnp);
4489 if ((error = vnode_get(dvp))) {
4490 nfs_node_unlock(np);
4491 goto out;
4492 }
4493 name = nsp->nsr_name;
4494 } else {
4495 /*
4496 * [sigh] We can't trust VFS to get the parent right for named
4497 * attribute nodes. (It likes to reparent the nodes after we've
4498 * created them.) Luckily we can probably get the right parent
4499 * from the n_parent we have stashed away.
4500 */
4501 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4502 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
4503 dvp = NULL;
4504 if (!dvp)
4505 dvp = vnode_getparent(vp);
4506 vname = vnode_getname(vp);
4507 if (!dvp || !vname) {
4508 if (!error)
4509 error = EIO;
4510 nfs_node_unlock(np);
4511 goto out;
4512 }
4513 name = vname;
4514 }
4515 filename = &smallname[0];
4516 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4517 if (namelen >= sizeof(smallname)) {
4518 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
4519 if (!filename) {
4520 error = ENOMEM;
4521 goto out;
4522 }
4523 snprintf(filename, namelen+1, "%s", name);
4524 }
4525 nfs_node_unlock(np);
4526 bzero(&cn, sizeof(cn));
4527 cn.cn_nameptr = filename;
4528 cn.cn_namelen = namelen;
4529
4530 restart:
4531 done = 0;
4532 if ((error = nfs_mount_state_in_use_start(nmp, thd)))
4533 goto out;
4534
4535 if (nofp->nof_rw)
4536 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4537 if (!error && nofp->nof_w)
4538 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4539 if (!error && nofp->nof_r)
4540 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4541
4542 if (nfs_mount_state_in_use_end(nmp, error)) {
4543 if (error == NFSERR_GRACE)
4544 goto restart;
4545 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4546 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4547 error = 0;
4548 goto out;
4549 }
4550 done = 1;
4551 out:
4552 if (error && (error != EINTR) && (error != ERESTART))
4553 nfs_revoke_open_state_for_node(np);
4554 lck_mtx_lock(&nofp->nof_lock);
4555 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4556 if (done)
4557 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4558 else if (error)
4559 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4560 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4561 lck_mtx_unlock(&nofp->nof_lock);
4562 if (filename && (filename != &smallname[0]))
4563 FREE(filename, M_TEMP);
4564 if (vname)
4565 vnode_putname(vname);
4566 if (dvp != NULLVP)
4567 vnode_put(dvp);
4568 return (error);
4569 }
4570
4571 /*
4572 * Send a normal OPEN RPC to open/create a file.
4573 */
4574 int
4575 nfs4_open_rpc(
4576 struct nfs_open_file *nofp,
4577 vfs_context_t ctx,
4578 struct componentname *cnp,
4579 struct vnode_attr *vap,
4580 vnode_t dvp,
4581 vnode_t *vpp,
4582 int create,
4583 int share_access,
4584 int share_deny)
4585 {
4586 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4587 cnp, vap, dvp, vpp, create, share_access, share_deny));
4588 }
4589
4590 /*
4591 * Send an OPEN RPC to reopen a file.
4592 */
4593 int
4594 nfs4_open_reopen_rpc(
4595 struct nfs_open_file *nofp,
4596 thread_t thd,
4597 kauth_cred_t cred,
4598 struct componentname *cnp,
4599 vnode_t dvp,
4600 vnode_t *vpp,
4601 int share_access,
4602 int share_deny)
4603 {
4604 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny));
4605 }
4606
4607 /*
4608 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4609 */
4610 int
4611 nfs4_open_confirm_rpc(
4612 struct nfsmount *nmp,
4613 nfsnode_t dnp,
4614 u_char *fhp,
4615 int fhlen,
4616 struct nfs_open_owner *noop,
4617 nfs_stateid *sid,
4618 thread_t thd,
4619 kauth_cred_t cred,
4620 struct nfs_vattr *nvap,
4621 uint64_t *xidp)
4622 {
4623 struct nfsm_chain nmreq, nmrep;
4624 int error = 0, status, numops;
4625 struct nfsreq_secinfo_args si;
4626
4627 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
4628 nfsm_chain_null(&nmreq);
4629 nfsm_chain_null(&nmrep);
4630
4631 // PUTFH, OPEN_CONFIRM, GETATTR
4632 numops = 3;
4633 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4634 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
4635 numops--;
4636 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4637 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4638 numops--;
4639 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4640 nfsm_chain_add_stateid(error, &nmreq, sid);
4641 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4642 numops--;
4643 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4644 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4645 nfsm_chain_build_done(error, &nmreq);
4646 nfsm_assert(error, (numops == 0), EPROTO);
4647 nfsmout_if(error);
4648 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
4649
4650 nfsm_chain_skip_tag(error, &nmrep);
4651 nfsm_chain_get_32(error, &nmrep, numops);
4652 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4653 nfsmout_if(error);
4654 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4655 nfs_owner_seqid_increment(noop, NULL, error);
4656 nfsm_chain_get_stateid(error, &nmrep, sid);
4657 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4658 nfsmout_if(error);
4659 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
4660 nfsmout:
4661 nfsm_chain_cleanup(&nmreq);
4662 nfsm_chain_cleanup(&nmrep);
4663 return (error);
4664 }
4665
4666 /*
4667 * common OPEN RPC code
4668 *
4669 * If create is set, ctx must be passed in.
4670 * Returns a node on success if no node passed in.
4671 */
4672 int
4673 nfs4_open_rpc_internal(
4674 struct nfs_open_file *nofp,
4675 vfs_context_t ctx,
4676 thread_t thd,
4677 kauth_cred_t cred,
4678 struct componentname *cnp,
4679 struct vnode_attr *vap,
4680 vnode_t dvp,
4681 vnode_t *vpp,
4682 int create,
4683 int share_access,
4684 int share_deny)
4685 {
4686 struct nfsmount *nmp;
4687 struct nfs_open_owner *noop = nofp->nof_owner;
4688 struct nfs_vattr nvattr;
4689 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
4690 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
4691 u_int64_t xid, savedxid = 0;
4692 nfsnode_t dnp = VTONFS(dvp);
4693 nfsnode_t np, newnp = NULL;
4694 vnode_t newvp = NULL;
4695 struct nfsm_chain nmreq, nmrep;
4696 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4697 uint32_t rflags, delegation, recall;
4698 struct nfs_stateid stateid, dstateid, *sid;
4699 fhandle_t fh;
4700 struct nfsreq rq, *req = &rq;
4701 struct nfs_dulookup dul;
4702 char sbuf[64], *s;
4703 uint32_t ace_type, ace_flags, ace_mask, len, slen;
4704 struct kauth_ace ace;
4705 struct nfsreq_secinfo_args si;
4706
4707 if (create && !ctx)
4708 return (EINVAL);
4709
4710 nmp = VTONMP(dvp);
4711 if (nfs_mount_gone(nmp))
4712 return (ENXIO);
4713 nfsvers = nmp->nm_vers;
4714 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
4715 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
4716 return (EINVAL);
4717
4718 np = *vpp ? VTONFS(*vpp) : NULL;
4719 if (create && vap) {
4720 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4721 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4722 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4723 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4724 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)))
4725 vap->va_vaflags |= VA_UTIMES_NULL;
4726 } else {
4727 exclusive = gotuid = gotgid = 0;
4728 }
4729 if (nofp) {
4730 sid = &nofp->nof_stateid;
4731 } else {
4732 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4733 sid = &stateid;
4734 }
4735
4736 if ((error = nfs_open_owner_set_busy(noop, thd)))
4737 return (error);
4738 again:
4739 rflags = delegation = recall = 0;
4740 ace.ace_flags = 0;
4741 s = sbuf;
4742 slen = sizeof(sbuf);
4743 NVATTR_INIT(&nvattr);
4744 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
4745
4746 nfsm_chain_null(&nmreq);
4747 nfsm_chain_null(&nmrep);
4748
4749 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4750 numops = 6;
4751 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4752 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
4753 numops--;
4754 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4755 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4756 numops--;
4757 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4758 numops--;
4759 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4760 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4761 nfsm_chain_add_32(error, &nmreq, share_access);
4762 nfsm_chain_add_32(error, &nmreq, share_deny);
4763 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
4764 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4765 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
4766 nfsm_chain_add_32(error, &nmreq, create);
4767 if (create) {
4768 if (exclusive) {
4769 static uint32_t create_verf; // XXX need a better verifier
4770 create_verf++;
4771 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4772 /* insert 64 bit verifier */
4773 nfsm_chain_add_32(error, &nmreq, create_verf);
4774 nfsm_chain_add_32(error, &nmreq, create_verf);
4775 } else {
4776 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4777 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4778 }
4779 }
4780 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
4781 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
4782 numops--;
4783 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4784 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4785 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4786 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
4787 numops--;
4788 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4789 numops--;
4790 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4791 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
4792 nfsm_chain_build_done(error, &nmreq);
4793 nfsm_assert(error, (numops == 0), EPROTO);
4794 if (!error)
4795 error = busyerror = nfs_node_set_busy(dnp, thd);
4796 nfsmout_if(error);
4797
4798 if (create && !namedattrs)
4799 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4800
4801 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
4802 if (!error) {
4803 if (create && !namedattrs)
4804 nfs_dulookup_start(&dul, dnp, ctx);
4805 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4806 savedxid = xid;
4807 }
4808
4809 if (create && !namedattrs)
4810 nfs_dulookup_finish(&dul, dnp, ctx);
4811
4812 if ((lockerror = nfs_node_lock(dnp)))
4813 error = lockerror;
4814 nfsm_chain_skip_tag(error, &nmrep);
4815 nfsm_chain_get_32(error, &nmrep, numops);
4816 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4817 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4818 nfsmout_if(error);
4819 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4820 nfs_owner_seqid_increment(noop, NULL, error);
4821 nfsm_chain_get_stateid(error, &nmrep, sid);
4822 nfsm_chain_check_change_info(error, &nmrep, dnp);
4823 nfsm_chain_get_32(error, &nmrep, rflags);
4824 bmlen = NFS_ATTR_BITMAP_LEN;
4825 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4826 nfsm_chain_get_32(error, &nmrep, delegation);
4827 if (!error)
4828 switch (delegation) {
4829 case NFS_OPEN_DELEGATE_NONE:
4830 break;
4831 case NFS_OPEN_DELEGATE_READ:
4832 case NFS_OPEN_DELEGATE_WRITE:
4833 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4834 nfsm_chain_get_32(error, &nmrep, recall);
4835 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
4836 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4837 /* if we have any trouble accepting the ACE, just invalidate it */
4838 ace_type = ace_flags = ace_mask = len = 0;
4839 nfsm_chain_get_32(error, &nmrep, ace_type);
4840 nfsm_chain_get_32(error, &nmrep, ace_flags);
4841 nfsm_chain_get_32(error, &nmrep, ace_mask);
4842 nfsm_chain_get_32(error, &nmrep, len);
4843 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
4844 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
4845 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
4846 if (!error && (len >= slen)) {
4847 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
4848 if (s)
4849 slen = len+1;
4850 else
4851 ace.ace_flags = 0;
4852 }
4853 if (s)
4854 nfsm_chain_get_opaque(error, &nmrep, len, s);
4855 else
4856 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
4857 if (!error && s) {
4858 s[len] = '\0';
4859 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
4860 ace.ace_flags = 0;
4861 }
4862 if (error || !s)
4863 ace.ace_flags = 0;
4864 if (s && (s != sbuf))
4865 FREE(s, M_TEMP);
4866 break;
4867 default:
4868 error = EBADRPC;
4869 break;
4870 }
4871 /* At this point if we have no error, the object was created/opened. */
4872 open_error = error;
4873 nfsmout_if(error);
4874 if (create && vap && !exclusive)
4875 nfs_vattr_set_supported(bitmap, vap);
4876 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4877 nfsmout_if(error);
4878 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
4879 nfsmout_if(error);
4880 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4881 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
4882 error = EBADRPC;
4883 goto nfsmout;
4884 }
4885 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4886 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4887 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
4888 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
4889 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
4890 }
4891 /* directory attributes: if we don't get them, make sure to invalidate */
4892 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4893 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4894 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
4895 if (error)
4896 NATTRINVALIDATE(dnp);
4897 nfsmout_if(error);
4898
4899 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4900 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4901
4902 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4903 nfs_node_unlock(dnp);
4904 lockerror = ENOENT;
4905 NVATTR_CLEANUP(&nvattr);
4906 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
4907 nfsmout_if(error);
4908 savedxid = xid;
4909 if ((lockerror = nfs_node_lock(dnp)))
4910 error = lockerror;
4911 }
4912
4913 nfsmout:
4914 nfsm_chain_cleanup(&nmreq);
4915 nfsm_chain_cleanup(&nmrep);
4916
4917 if (!lockerror && create) {
4918 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4919 dnp->n_flag &= ~NNEGNCENTRIES;
4920 cache_purge_negatives(dvp);
4921 }
4922 dnp->n_flag |= NMODIFIED;
4923 nfs_node_unlock(dnp);
4924 lockerror = ENOENT;
4925 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
4926 }
4927 if (!lockerror)
4928 nfs_node_unlock(dnp);
4929 if (!error && !np && fh.fh_len) {
4930 /* create the vnode with the filehandle and attributes */
4931 xid = savedxid;
4932 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
4933 if (!error)
4934 newvp = NFSTOV(newnp);
4935 }
4936 NVATTR_CLEANUP(&nvattr);
4937 if (!busyerror)
4938 nfs_node_clear_busy(dnp);
4939 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4940 if (!np)
4941 np = newnp;
4942 if (!error && np && !recall) {
4943 /* stuff the delegation state in the node */
4944 lck_mtx_lock(&np->n_openlock);
4945 np->n_openflags &= ~N_DELEG_MASK;
4946 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4947 np->n_dstateid = dstateid;
4948 np->n_dace = ace;
4949 if (np->n_dlink.tqe_next == NFSNOLIST) {
4950 lck_mtx_lock(&nmp->nm_lock);
4951 if (np->n_dlink.tqe_next == NFSNOLIST)
4952 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4953 lck_mtx_unlock(&nmp->nm_lock);
4954 }
4955 lck_mtx_unlock(&np->n_openlock);
4956 } else {
4957 /* give the delegation back */
4958 if (np) {
4959 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4960 /* update delegation state and return it */
4961 lck_mtx_lock(&np->n_openlock);
4962 np->n_openflags &= ~N_DELEG_MASK;
4963 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4964 np->n_dstateid = dstateid;
4965 np->n_dace = ace;
4966 if (np->n_dlink.tqe_next == NFSNOLIST) {
4967 lck_mtx_lock(&nmp->nm_lock);
4968 if (np->n_dlink.tqe_next == NFSNOLIST)
4969 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
4970 lck_mtx_unlock(&nmp->nm_lock);
4971 }
4972 lck_mtx_unlock(&np->n_openlock);
4973 /* don't need to send a separate delegreturn for fh */
4974 fh.fh_len = 0;
4975 }
4976 /* return np's current delegation */
4977 nfs4_delegation_return(np, 0, thd, cred);
4978 }
4979 if (fh.fh_len) /* return fh's delegation if it wasn't for np */
4980 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
4981 }
4982 }
4983 if (error) {
4984 if (exclusive && (error == NFSERR_NOTSUPP)) {
4985 exclusive = 0;
4986 goto again;
4987 }
4988 if (newvp) {
4989 nfs_node_unlock(newnp);
4990 vnode_put(newvp);
4991 }
4992 } else if (create) {
4993 nfs_node_unlock(newnp);
4994 if (exclusive) {
4995 error = nfs4_setattr_rpc(newnp, vap, ctx);
4996 if (error && (gotuid || gotgid)) {
4997 /* it's possible the server didn't like our attempt to set IDs. */
4998 /* so, let's try it again without those */
4999 VATTR_CLEAR_ACTIVE(vap, va_uid);
5000 VATTR_CLEAR_ACTIVE(vap, va_gid);
5001 error = nfs4_setattr_rpc(newnp, vap, ctx);
5002 }
5003 }
5004 if (error)
5005 vnode_put(newvp);
5006 else
5007 *vpp = newvp;
5008 }
5009 nfs_open_owner_clear_busy(noop);
5010 return (error);
5011 }
5012
5013
5014 /*
5015 * Send an OPEN RPC to claim a delegated open for a file
5016 */
5017 int
5018 nfs4_claim_delegated_open_rpc(
5019 struct nfs_open_file *nofp,
5020 int share_access,
5021 int share_deny,
5022 int flags)
5023 {
5024 struct nfsmount *nmp;
5025 struct nfs_open_owner *noop = nofp->nof_owner;
5026 struct nfs_vattr nvattr;
5027 int error = 0, lockerror = ENOENT, status;
5028 int nfsvers, numops;
5029 u_int64_t xid;
5030 nfsnode_t np = nofp->nof_np;
5031 struct nfsm_chain nmreq, nmrep;
5032 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5033 uint32_t rflags = 0, delegation, recall = 0;
5034 fhandle_t fh;
5035 struct nfs_stateid dstateid;
5036 char sbuf[64], *s = sbuf;
5037 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5038 struct kauth_ace ace;
5039 vnode_t dvp = NULL;
5040 const char *vname = NULL;
5041 const char *name = NULL;
5042 size_t namelen;
5043 char smallname[128];
5044 char *filename = NULL;
5045 struct nfsreq_secinfo_args si;
5046
5047 nmp = NFSTONMP(np);
5048 if (nfs_mount_gone(nmp))
5049 return (ENXIO);
5050 nfsvers = nmp->nm_vers;
5051
5052 nfs_node_lock_force(np);
5053 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5054 /*
5055 * The node's been sillyrenamed, so we need to use
5056 * the sillyrename directory/name to do the open.
5057 */
5058 struct nfs_sillyrename *nsp = np->n_sillyrename;
5059 dvp = NFSTOV(nsp->nsr_dnp);
5060 if ((error = vnode_get(dvp))) {
5061 nfs_node_unlock(np);
5062 goto out;
5063 }
5064 name = nsp->nsr_name;
5065 } else {
5066 /*
5067 * [sigh] We can't trust VFS to get the parent right for named
5068 * attribute nodes. (It likes to reparent the nodes after we've
5069 * created them.) Luckily we can probably get the right parent
5070 * from the n_parent we have stashed away.
5071 */
5072 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5073 (((dvp = np->n_parent)) && (error = vnode_get(dvp))))
5074 dvp = NULL;
5075 if (!dvp)
5076 dvp = vnode_getparent(NFSTOV(np));
5077 vname = vnode_getname(NFSTOV(np));
5078 if (!dvp || !vname) {
5079 if (!error)
5080 error = EIO;
5081 nfs_node_unlock(np);
5082 goto out;
5083 }
5084 name = vname;
5085 }
5086 filename = &smallname[0];
5087 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5088 if (namelen >= sizeof(smallname)) {
5089 MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK);
5090 if (!filename) {
5091 error = ENOMEM;
5092 nfs_node_unlock(np);
5093 goto out;
5094 }
5095 snprintf(filename, namelen+1, "%s", name);
5096 }
5097 nfs_node_unlock(np);
5098
5099 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5100 goto out;
5101 NVATTR_INIT(&nvattr);
5102 delegation = NFS_OPEN_DELEGATE_NONE;
5103 dstateid = np->n_dstateid;
5104 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5105
5106 nfsm_chain_null(&nmreq);
5107 nfsm_chain_null(&nmrep);
5108
5109 // PUTFH, OPEN, GETATTR(FH)
5110 numops = 3;
5111 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5112 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5113 numops--;
5114 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5115 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5116 numops--;
5117 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5118 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5119 nfsm_chain_add_32(error, &nmreq, share_access);
5120 nfsm_chain_add_32(error, &nmreq, share_deny);
5121 // open owner: clientid + uid
5122 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5123 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5124 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5125 // openflag4
5126 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5127 // open_claim4
5128 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5129 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5130 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5131 numops--;
5132 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5133 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5134 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5135 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5136 nfsm_chain_build_done(error, &nmreq);
5137 nfsm_assert(error, (numops == 0), EPROTO);
5138 nfsmout_if(error);
5139
5140 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5141 noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5142
5143 if ((lockerror = nfs_node_lock(np)))
5144 error = lockerror;
5145 nfsm_chain_skip_tag(error, &nmrep);
5146 nfsm_chain_get_32(error, &nmrep, numops);
5147 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5148 nfsmout_if(error);
5149 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5150 nfs_owner_seqid_increment(noop, NULL, error);
5151 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5152 nfsm_chain_check_change_info(error, &nmrep, np);
5153 nfsm_chain_get_32(error, &nmrep, rflags);
5154 bmlen = NFS_ATTR_BITMAP_LEN;
5155 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5156 nfsm_chain_get_32(error, &nmrep, delegation);
5157 if (!error)
5158 switch (delegation) {
5159 case NFS_OPEN_DELEGATE_NONE:
5160 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5161 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5162 break;
5163 case NFS_OPEN_DELEGATE_READ:
5164 case NFS_OPEN_DELEGATE_WRITE:
5165 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5166 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5167 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5168 (delegation == NFS_OPEN_DELEGATE_READ)))
5169 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5170 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5171 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5172 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5173 nfsm_chain_get_32(error, &nmrep, recall);
5174 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5175 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5176 /* if we have any trouble accepting the ACE, just invalidate it */
5177 ace_type = ace_flags = ace_mask = len = 0;
5178 nfsm_chain_get_32(error, &nmrep, ace_type);
5179 nfsm_chain_get_32(error, &nmrep, ace_flags);
5180 nfsm_chain_get_32(error, &nmrep, ace_mask);
5181 nfsm_chain_get_32(error, &nmrep, len);
5182 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5183 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5184 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5185 if (!error && (len >= slen)) {
5186 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5187 if (s)
5188 slen = len+1;
5189 else
5190 ace.ace_flags = 0;
5191 }
5192 if (s)
5193 nfsm_chain_get_opaque(error, &nmrep, len, s);
5194 else
5195 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5196 if (!error && s) {
5197 s[len] = '\0';
5198 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5199 ace.ace_flags = 0;
5200 }
5201 if (error || !s)
5202 ace.ace_flags = 0;
5203 if (s && (s != sbuf))
5204 FREE(s, M_TEMP);
5205 if (!error) {
5206 /* stuff the latest delegation state in the node */
5207 lck_mtx_lock(&np->n_openlock);
5208 np->n_openflags &= ~N_DELEG_MASK;
5209 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5210 np->n_dstateid = dstateid;
5211 np->n_dace = ace;
5212 if (np->n_dlink.tqe_next == NFSNOLIST) {
5213 lck_mtx_lock(&nmp->nm_lock);
5214 if (np->n_dlink.tqe_next == NFSNOLIST)
5215 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5216 lck_mtx_unlock(&nmp->nm_lock);
5217 }
5218 lck_mtx_unlock(&np->n_openlock);
5219 }
5220 break;
5221 default:
5222 error = EBADRPC;
5223 break;
5224 }
5225 nfsmout_if(error);
5226 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5227 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5228 nfsmout_if(error);
5229 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5230 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5231 error = EBADRPC;
5232 goto nfsmout;
5233 }
5234 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5235 // XXX what if fh doesn't match the vnode we think we're re-opening?
5236 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5237 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5238 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5239 }
5240 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5241 nfsmout_if(error);
5242 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5243 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5244 nfsmout:
5245 NVATTR_CLEANUP(&nvattr);
5246 nfsm_chain_cleanup(&nmreq);
5247 nfsm_chain_cleanup(&nmrep);
5248 if (!lockerror)
5249 nfs_node_unlock(np);
5250 nfs_open_owner_clear_busy(noop);
5251 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5252 if (recall) {
5253 /*
5254 * We're making a delegated claim.
5255 * Don't return the delegation here in case we have more to claim.
5256 * Just make sure it's queued up to be returned.
5257 */
5258 nfs4_delegation_return_enqueue(np);
5259 }
5260 }
5261 out:
5262 // if (!error)
5263 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5264 if (filename && (filename != &smallname[0]))
5265 FREE(filename, M_TEMP);
5266 if (vname)
5267 vnode_putname(vname);
5268 if (dvp != NULLVP)
5269 vnode_put(dvp);
5270 return (error);
5271 }
5272
5273 /*
5274 * Send an OPEN RPC to reclaim an open file.
5275 */
5276 int
5277 nfs4_open_reclaim_rpc(
5278 struct nfs_open_file *nofp,
5279 int share_access,
5280 int share_deny)
5281 {
5282 struct nfsmount *nmp;
5283 struct nfs_open_owner *noop = nofp->nof_owner;
5284 struct nfs_vattr nvattr;
5285 int error = 0, lockerror = ENOENT, status;
5286 int nfsvers, numops;
5287 u_int64_t xid;
5288 nfsnode_t np = nofp->nof_np;
5289 struct nfsm_chain nmreq, nmrep;
5290 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5291 uint32_t rflags = 0, delegation, recall = 0;
5292 fhandle_t fh;
5293 struct nfs_stateid dstateid;
5294 char sbuf[64], *s = sbuf;
5295 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5296 struct kauth_ace ace;
5297 struct nfsreq_secinfo_args si;
5298
5299 nmp = NFSTONMP(np);
5300 if (nfs_mount_gone(nmp))
5301 return (ENXIO);
5302 nfsvers = nmp->nm_vers;
5303
5304 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5305 return (error);
5306
5307 NVATTR_INIT(&nvattr);
5308 delegation = NFS_OPEN_DELEGATE_NONE;
5309 dstateid = np->n_dstateid;
5310 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5311
5312 nfsm_chain_null(&nmreq);
5313 nfsm_chain_null(&nmrep);
5314
5315 // PUTFH, OPEN, GETATTR(FH)
5316 numops = 3;
5317 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5318 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5319 numops--;
5320 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5321 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5322 numops--;
5323 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5324 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5325 nfsm_chain_add_32(error, &nmreq, share_access);
5326 nfsm_chain_add_32(error, &nmreq, share_deny);
5327 // open owner: clientid + uid
5328 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5329 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5330 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5331 // openflag4
5332 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5333 // open_claim4
5334 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5335 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5336 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5337 NFS_OPEN_DELEGATE_NONE;
5338 nfsm_chain_add_32(error, &nmreq, delegation);
5339 delegation = NFS_OPEN_DELEGATE_NONE;
5340 numops--;
5341 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5342 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5343 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5344 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5345 nfsm_chain_build_done(error, &nmreq);
5346 nfsm_assert(error, (numops == 0), EPROTO);
5347 nfsmout_if(error);
5348
5349 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5350 noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status);
5351
5352 if ((lockerror = nfs_node_lock(np)))
5353 error = lockerror;
5354 nfsm_chain_skip_tag(error, &nmrep);
5355 nfsm_chain_get_32(error, &nmrep, numops);
5356 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5357 nfsmout_if(error);
5358 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5359 nfs_owner_seqid_increment(noop, NULL, error);
5360 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5361 nfsm_chain_check_change_info(error, &nmrep, np);
5362 nfsm_chain_get_32(error, &nmrep, rflags);
5363 bmlen = NFS_ATTR_BITMAP_LEN;
5364 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5365 nfsm_chain_get_32(error, &nmrep, delegation);
5366 if (!error)
5367 switch (delegation) {
5368 case NFS_OPEN_DELEGATE_NONE:
5369 if (np->n_openflags & N_DELEG_MASK) {
5370 /*
5371 * Hey! We were supposed to get our delegation back even
5372 * if it was getting immediately recalled. Bad server!
5373 *
5374 * Just try to return the existing delegation.
5375 */
5376 // NP(np, "nfs: open reclaim didn't return delegation?");
5377 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5378 recall = 1;
5379 }
5380 break;
5381 case NFS_OPEN_DELEGATE_READ:
5382 case NFS_OPEN_DELEGATE_WRITE:
5383 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5384 nfsm_chain_get_32(error, &nmrep, recall);
5385 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
5386 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5387 /* if we have any trouble accepting the ACE, just invalidate it */
5388 ace_type = ace_flags = ace_mask = len = 0;
5389 nfsm_chain_get_32(error, &nmrep, ace_type);
5390 nfsm_chain_get_32(error, &nmrep, ace_flags);
5391 nfsm_chain_get_32(error, &nmrep, ace_mask);
5392 nfsm_chain_get_32(error, &nmrep, len);
5393 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5394 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5395 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5396 if (!error && (len >= slen)) {
5397 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
5398 if (s)
5399 slen = len+1;
5400 else
5401 ace.ace_flags = 0;
5402 }
5403 if (s)
5404 nfsm_chain_get_opaque(error, &nmrep, len, s);
5405 else
5406 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5407 if (!error && s) {
5408 s[len] = '\0';
5409 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
5410 ace.ace_flags = 0;
5411 }
5412 if (error || !s)
5413 ace.ace_flags = 0;
5414 if (s && (s != sbuf))
5415 FREE(s, M_TEMP);
5416 if (!error) {
5417 /* stuff the delegation state in the node */
5418 lck_mtx_lock(&np->n_openlock);
5419 np->n_openflags &= ~N_DELEG_MASK;
5420 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5421 np->n_dstateid = dstateid;
5422 np->n_dace = ace;
5423 if (np->n_dlink.tqe_next == NFSNOLIST) {
5424 lck_mtx_lock(&nmp->nm_lock);
5425 if (np->n_dlink.tqe_next == NFSNOLIST)
5426 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5427 lck_mtx_unlock(&nmp->nm_lock);
5428 }
5429 lck_mtx_unlock(&np->n_openlock);
5430 }
5431 break;
5432 default:
5433 error = EBADRPC;
5434 break;
5435 }
5436 nfsmout_if(error);
5437 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5438 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5439 nfsmout_if(error);
5440 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5441 NP(np, "nfs: open reclaim didn't return filehandle?");
5442 error = EBADRPC;
5443 goto nfsmout;
5444 }
5445 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5446 // XXX what if fh doesn't match the vnode we think we're re-opening?
5447 // That should be pretty hard in this case, given that we are doing
5448 // the open reclaim using the file handle (and not a dir/name pair).
5449 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5450 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
5451 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5452 }
5453 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5454 nfsmout_if(error);
5455 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
5456 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5457 nfsmout:
5458 // if (!error)
5459 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5460 NVATTR_CLEANUP(&nvattr);
5461 nfsm_chain_cleanup(&nmreq);
5462 nfsm_chain_cleanup(&nmrep);
5463 if (!lockerror)
5464 nfs_node_unlock(np);
5465 nfs_open_owner_clear_busy(noop);
5466 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5467 if (recall)
5468 nfs4_delegation_return_enqueue(np);
5469 }
5470 return (error);
5471 }
5472
5473 int
5474 nfs4_open_downgrade_rpc(
5475 nfsnode_t np,
5476 struct nfs_open_file *nofp,
5477 vfs_context_t ctx)
5478 {
5479 struct nfs_open_owner *noop = nofp->nof_owner;
5480 struct nfsmount *nmp;
5481 int error, lockerror = ENOENT, status, nfsvers, numops;
5482 struct nfsm_chain nmreq, nmrep;
5483 u_int64_t xid;
5484 struct nfsreq_secinfo_args si;
5485
5486 nmp = NFSTONMP(np);
5487 if (nfs_mount_gone(nmp))
5488 return (ENXIO);
5489 nfsvers = nmp->nm_vers;
5490
5491 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5492 return (error);
5493
5494 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5495 nfsm_chain_null(&nmreq);
5496 nfsm_chain_null(&nmrep);
5497
5498 // PUTFH, OPEN_DOWNGRADE, GETATTR
5499 numops = 3;
5500 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5501 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5502 numops--;
5503 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5504 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5505 numops--;
5506 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5507 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5508 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5509 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5510 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5511 numops--;
5512 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5513 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5514 nfsm_chain_build_done(error, &nmreq);
5515 nfsm_assert(error, (numops == 0), EPROTO);
5516 nfsmout_if(error);
5517 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5518 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5519 &si, R_NOINTR, &nmrep, &xid, &status);
5520
5521 if ((lockerror = nfs_node_lock(np)))
5522 error = lockerror;
5523 nfsm_chain_skip_tag(error, &nmrep);
5524 nfsm_chain_get_32(error, &nmrep, numops);
5525 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5526 nfsmout_if(error);
5527 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5528 nfs_owner_seqid_increment(noop, NULL, error);
5529 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5530 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5531 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5532 nfsmout:
5533 if (!lockerror)
5534 nfs_node_unlock(np);
5535 nfs_open_owner_clear_busy(noop);
5536 nfsm_chain_cleanup(&nmreq);
5537 nfsm_chain_cleanup(&nmrep);
5538 return (error);
5539 }
5540
5541 int
5542 nfs4_close_rpc(
5543 nfsnode_t np,
5544 struct nfs_open_file *nofp,
5545 thread_t thd,
5546 kauth_cred_t cred,
5547 int flags)
5548 {
5549 struct nfs_open_owner *noop = nofp->nof_owner;
5550 struct nfsmount *nmp;
5551 int error, lockerror = ENOENT, status, nfsvers, numops;
5552 struct nfsm_chain nmreq, nmrep;
5553 u_int64_t xid;
5554 struct nfsreq_secinfo_args si;
5555
5556 nmp = NFSTONMP(np);
5557 if (nfs_mount_gone(nmp))
5558 return (ENXIO);
5559 nfsvers = nmp->nm_vers;
5560
5561 if ((error = nfs_open_owner_set_busy(noop, NULL)))
5562 return (error);
5563
5564 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5565 nfsm_chain_null(&nmreq);
5566 nfsm_chain_null(&nmrep);
5567
5568 // PUTFH, CLOSE, GETATTR
5569 numops = 3;
5570 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5571 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
5572 numops--;
5573 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5574 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5575 numops--;
5576 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
5577 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5578 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5579 numops--;
5580 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5581 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5582 nfsm_chain_build_done(error, &nmreq);
5583 nfsm_assert(error, (numops == 0), EPROTO);
5584 nfsmout_if(error);
5585 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status);
5586
5587 if ((lockerror = nfs_node_lock(np)))
5588 error = lockerror;
5589 nfsm_chain_skip_tag(error, &nmrep);
5590 nfsm_chain_get_32(error, &nmrep, numops);
5591 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5592 nfsmout_if(error);
5593 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
5594 nfs_owner_seqid_increment(noop, NULL, error);
5595 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5596 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5597 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5598 nfsmout:
5599 if (!lockerror)
5600 nfs_node_unlock(np);
5601 nfs_open_owner_clear_busy(noop);
5602 nfsm_chain_cleanup(&nmreq);
5603 nfsm_chain_cleanup(&nmrep);
5604 return (error);
5605 }
5606
5607
5608 /*
5609 * Claim the delegated open combinations this open file holds.
5610 */
5611 int
5612 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
5613 {
5614 struct nfs_open_owner *noop = nofp->nof_owner;
5615 struct nfs_lock_owner *nlop;
5616 struct nfs_file_lock *nflp, *nextnflp;
5617 struct nfsmount *nmp;
5618 int error = 0, reopen = 0;
5619
5620 if (nofp->nof_d_rw_drw) {
5621 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
5622 if (!error) {
5623 lck_mtx_lock(&nofp->nof_lock);
5624 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
5625 nofp->nof_d_rw_drw = 0;
5626 lck_mtx_unlock(&nofp->nof_lock);
5627 }
5628 }
5629 if (!error && nofp->nof_d_w_drw) {
5630 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
5631 if (!error) {
5632 lck_mtx_lock(&nofp->nof_lock);
5633 nofp->nof_w_drw += nofp->nof_d_w_drw;
5634 nofp->nof_d_w_drw = 0;
5635 lck_mtx_unlock(&nofp->nof_lock);
5636 }
5637 }
5638 if (!error && nofp->nof_d_r_drw) {
5639 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
5640 if (!error) {
5641 lck_mtx_lock(&nofp->nof_lock);
5642 nofp->nof_r_drw += nofp->nof_d_r_drw;
5643 nofp->nof_d_r_drw = 0;
5644 lck_mtx_unlock(&nofp->nof_lock);
5645 }
5646 }
5647 if (!error && nofp->nof_d_rw_dw) {
5648 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
5649 if (!error) {
5650 lck_mtx_lock(&nofp->nof_lock);
5651 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
5652 nofp->nof_d_rw_dw = 0;
5653 lck_mtx_unlock(&nofp->nof_lock);
5654 }
5655 }
5656 if (!error && nofp->nof_d_w_dw) {
5657 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
5658 if (!error) {
5659 lck_mtx_lock(&nofp->nof_lock);
5660 nofp->nof_w_dw += nofp->nof_d_w_dw;
5661 nofp->nof_d_w_dw = 0;
5662 lck_mtx_unlock(&nofp->nof_lock);
5663 }
5664 }
5665 if (!error && nofp->nof_d_r_dw) {
5666 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
5667 if (!error) {
5668 lck_mtx_lock(&nofp->nof_lock);
5669 nofp->nof_r_dw += nofp->nof_d_r_dw;
5670 nofp->nof_d_r_dw = 0;
5671 lck_mtx_unlock(&nofp->nof_lock);
5672 }
5673 }
5674 /* non-deny-mode opens may be reopened if no locks are held */
5675 if (!error && nofp->nof_d_rw) {
5676 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
5677 /* for some errors, we should just try reopening the file */
5678 if (nfs_mount_state_error_delegation_lost(error))
5679 reopen = error;
5680 if (!error || reopen) {
5681 lck_mtx_lock(&nofp->nof_lock);
5682 nofp->nof_rw += nofp->nof_d_rw;
5683 nofp->nof_d_rw = 0;
5684 lck_mtx_unlock(&nofp->nof_lock);
5685 }
5686 }
5687 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
5688 if ((!error || reopen) && nofp->nof_d_w) {
5689 if (!error) {
5690 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
5691 /* for some errors, we should just try reopening the file */
5692 if (nfs_mount_state_error_delegation_lost(error))
5693 reopen = error;
5694 }
5695 if (!error || reopen) {
5696 lck_mtx_lock(&nofp->nof_lock);
5697 nofp->nof_w += nofp->nof_d_w;
5698 nofp->nof_d_w = 0;
5699 lck_mtx_unlock(&nofp->nof_lock);
5700 }
5701 }
5702 if ((!error || reopen) && nofp->nof_d_r) {
5703 if (!error) {
5704 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
5705 /* for some errors, we should just try reopening the file */
5706 if (nfs_mount_state_error_delegation_lost(error))
5707 reopen = error;
5708 }
5709 if (!error || reopen) {
5710 lck_mtx_lock(&nofp->nof_lock);
5711 nofp->nof_r += nofp->nof_d_r;
5712 nofp->nof_d_r = 0;
5713 lck_mtx_unlock(&nofp->nof_lock);
5714 }
5715 }
5716
5717 if (reopen) {
5718 /*
5719 * Any problems with the delegation probably indicates that we
5720 * should review/return all of our current delegation state.
5721 */
5722 if ((nmp = NFSTONMP(nofp->nof_np))) {
5723 nfs4_delegation_return_enqueue(nofp->nof_np);
5724 lck_mtx_lock(&nmp->nm_lock);
5725 nfs_need_recover(nmp, NFSERR_EXPIRED);
5726 lck_mtx_unlock(&nmp->nm_lock);
5727 }
5728 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
5729 /* just reopen the file on next access */
5730 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
5731 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5732 lck_mtx_lock(&nofp->nof_lock);
5733 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
5734 lck_mtx_unlock(&nofp->nof_lock);
5735 return (0);
5736 }
5737 if (reopen)
5738 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
5739 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5740 }
5741
5742 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
5743 /* claim delegated locks */
5744 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
5745 if (nlop->nlo_open_owner != noop)
5746 continue;
5747 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
5748 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
5749 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5750 continue;
5751 /* skip non-delegated locks */
5752 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5753 continue;
5754 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
5755 if (error) {
5756 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
5757 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5758 break;
5759 }
5760 // else {
5761 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
5762 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5763 // }
5764 }
5765 if (error)
5766 break;
5767 }
5768 }
5769
5770 if (!error) /* all state claimed successfully! */
5771 return (0);
5772
5773 /* restart if it looks like a problem more than just losing the delegation */
5774 if (!nfs_mount_state_error_delegation_lost(error) &&
5775 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
5776 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5777 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np))))
5778 nfs_need_reconnect(nmp);
5779 return (error);
5780 }
5781
5782 /* delegated state lost (once held but now not claimable) */
5783 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
5784
5785 /*
5786 * Any problems with the delegation probably indicates that we
5787 * should review/return all of our current delegation state.
5788 */
5789 if ((nmp = NFSTONMP(nofp->nof_np))) {
5790 nfs4_delegation_return_enqueue(nofp->nof_np);
5791 lck_mtx_lock(&nmp->nm_lock);
5792 nfs_need_recover(nmp, NFSERR_EXPIRED);
5793 lck_mtx_unlock(&nmp->nm_lock);
5794 }
5795
5796 /* revoke all open file state */
5797 nfs_revoke_open_state_for_node(nofp->nof_np);
5798
5799 return (error);
5800 }
5801
5802 /*
5803 * Release all open state for the given node.
5804 */
5805 void
5806 nfs_release_open_state_for_node(nfsnode_t np, int force)
5807 {
5808 struct nfsmount *nmp = NFSTONMP(np);
5809 struct nfs_open_file *nofp;
5810 struct nfs_file_lock *nflp, *nextnflp;
5811
5812 /* drop held locks */
5813 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
5814 /* skip dead & blocked lock requests */
5815 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED))
5816 continue;
5817 /* send an unlock if not a delegated lock */
5818 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED))
5819 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
5820 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
5821 /* kill/remove the lock */
5822 lck_mtx_lock(&np->n_openlock);
5823 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
5824 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
5825 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
5826 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
5827 if (nflp->nfl_blockcnt) {
5828 /* wake up anyone blocked on this lock */
5829 wakeup(nflp);
5830 } else {
5831 /* remove nflp from lock list and destroy */
5832 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
5833 nfs_file_lock_destroy(nflp);
5834 }
5835 lck_mtx_unlock(&np->n_openlock);
5836 }
5837
5838 lck_mtx_lock(&np->n_openlock);
5839
5840 /* drop all opens */
5841 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5842 if (nofp->nof_flags & NFS_OPEN_FILE_LOST)
5843 continue;
5844 /* mark open state as lost */
5845 lck_mtx_lock(&nofp->nof_lock);
5846 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
5847 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
5848
5849 lck_mtx_unlock(&nofp->nof_lock);
5850 if (!force && nmp && (nmp->nm_vers >= NFS_VER4))
5851 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
5852 }
5853
5854 lck_mtx_unlock(&np->n_openlock);
5855 }
5856
5857 /*
5858 * State for a node has been lost, drop it, and revoke the node.
5859 * Attempt to return any state if possible in case the server
5860 * might somehow think we hold it.
5861 */
5862 void
5863 nfs_revoke_open_state_for_node(nfsnode_t np)
5864 {
5865 struct nfsmount *nmp;
5866
5867 /* mark node as needing to be revoked */
5868 nfs_node_lock_force(np);
5869 if (np->n_flag & NREVOKE) /* already revoked? */
5870 {
5871 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
5872 nfs_node_unlock(np);
5873 return;
5874 }
5875 np->n_flag |= NREVOKE;
5876 nfs_node_unlock(np);
5877
5878 nfs_release_open_state_for_node(np, 0);
5879 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
5880
5881 /* mark mount as needing a revoke scan and have the socket thread do it. */
5882 if ((nmp = NFSTONMP(np))) {
5883 lck_mtx_lock(&nmp->nm_lock);
5884 nmp->nm_state |= NFSSTA_REVOKE;
5885 nfs_mount_sock_thread_wake(nmp);
5886 lck_mtx_unlock(&nmp->nm_lock);
5887 }
5888 }
5889
5890 /*
5891 * Claim the delegated open combinations that each of this node's open files hold.
5892 */
5893 int
5894 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
5895 {
5896 struct nfs_open_file *nofp;
5897 int error = 0;
5898
5899 lck_mtx_lock(&np->n_openlock);
5900
5901 /* walk the open file list looking for opens with delegated state to claim */
5902 restart:
5903 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
5904 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
5905 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
5906 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r)
5907 continue;
5908 lck_mtx_unlock(&np->n_openlock);
5909 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
5910 lck_mtx_lock(&np->n_openlock);
5911 if (error)
5912 break;
5913 goto restart;
5914 }
5915
5916 lck_mtx_unlock(&np->n_openlock);
5917
5918 return (error);
5919 }
5920
5921 /*
5922 * Mark a node as needed to have its delegation returned.
5923 * Queue it up on the delegation return queue.
5924 * Make sure the thread is running.
5925 */
5926 void
5927 nfs4_delegation_return_enqueue(nfsnode_t np)
5928 {
5929 struct nfsmount *nmp;
5930
5931 nmp = NFSTONMP(np);
5932 if (nfs_mount_gone(nmp))
5933 return;
5934
5935 lck_mtx_lock(&np->n_openlock);
5936 np->n_openflags |= N_DELEG_RETURN;
5937 lck_mtx_unlock(&np->n_openlock);
5938
5939 lck_mtx_lock(&nmp->nm_lock);
5940 if (np->n_dreturn.tqe_next == NFSNOLIST)
5941 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
5942 nfs_mount_sock_thread_wake(nmp);
5943 lck_mtx_unlock(&nmp->nm_lock);
5944 }
5945
5946 /*
5947 * return any delegation we may have for the given node
5948 */
5949 int
5950 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
5951 {
5952 struct nfsmount *nmp;
5953 fhandle_t fh;
5954 nfs_stateid dstateid;
5955 int error;
5956
5957 nmp = NFSTONMP(np);
5958 if (nfs_mount_gone(nmp))
5959 return (ENXIO);
5960
5961 /* first, make sure the node's marked for delegation return */
5962 lck_mtx_lock(&np->n_openlock);
5963 np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING);
5964 lck_mtx_unlock(&np->n_openlock);
5965
5966 /* make sure nobody else is using the delegation state */
5967 if ((error = nfs_open_state_set_busy(np, NULL)))
5968 goto out;
5969
5970 /* claim any delegated state */
5971 if ((error = nfs4_claim_delegated_state_for_node(np, flags)))
5972 goto out;
5973
5974 /* return the delegation */
5975 lck_mtx_lock(&np->n_openlock);
5976 dstateid = np->n_dstateid;
5977 fh.fh_len = np->n_fhsize;
5978 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
5979 lck_mtx_unlock(&np->n_openlock);
5980 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
5981 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
5982 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
5983 lck_mtx_lock(&np->n_openlock);
5984 np->n_openflags &= ~N_DELEG_MASK;
5985 lck_mtx_lock(&nmp->nm_lock);
5986 if (np->n_dlink.tqe_next != NFSNOLIST) {
5987 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
5988 np->n_dlink.tqe_next = NFSNOLIST;
5989 }
5990 lck_mtx_unlock(&nmp->nm_lock);
5991 lck_mtx_unlock(&np->n_openlock);
5992 }
5993
5994 out:
5995 /* make sure it's no longer on the return queue and clear the return flags */
5996 lck_mtx_lock(&nmp->nm_lock);
5997 if (np->n_dreturn.tqe_next != NFSNOLIST) {
5998 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
5999 np->n_dreturn.tqe_next = NFSNOLIST;
6000 }
6001 lck_mtx_unlock(&nmp->nm_lock);
6002 lck_mtx_lock(&np->n_openlock);
6003 np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING);
6004 lck_mtx_unlock(&np->n_openlock);
6005
6006 if (error) {
6007 NP(np, "nfs4_delegation_return, error %d", error);
6008 if (error == ETIMEDOUT)
6009 nfs_need_reconnect(nmp);
6010 if (nfs_mount_state_error_should_restart(error)) {
6011 /* make sure recovery happens */
6012 lck_mtx_lock(&nmp->nm_lock);
6013 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6014 lck_mtx_unlock(&nmp->nm_lock);
6015 }
6016 }
6017
6018 nfs_open_state_clear_busy(np);
6019
6020 return (error);
6021 }
6022
6023 /*
6024 * RPC to return a delegation for a file handle
6025 */
6026 int
6027 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6028 {
6029 int error = 0, status, numops;
6030 uint64_t xid;
6031 struct nfsm_chain nmreq, nmrep;
6032 struct nfsreq_secinfo_args si;
6033
6034 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6035 nfsm_chain_null(&nmreq);
6036 nfsm_chain_null(&nmrep);
6037
6038 // PUTFH, DELEGRETURN
6039 numops = 2;
6040 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6041 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6042 numops--;
6043 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6044 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6045 numops--;
6046 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6047 nfsm_chain_add_stateid(error, &nmreq, sid);
6048 nfsm_chain_build_done(error, &nmreq);
6049 nfsm_assert(error, (numops == 0), EPROTO);
6050 nfsmout_if(error);
6051 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6052 nfsm_chain_skip_tag(error, &nmrep);
6053 nfsm_chain_get_32(error, &nmrep, numops);
6054 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6055 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6056 nfsmout:
6057 nfsm_chain_cleanup(&nmreq);
6058 nfsm_chain_cleanup(&nmrep);
6059 return (error);
6060 }
6061
6062
6063 /*
6064 * NFS read call.
6065 * Just call nfs_bioread() to do the work.
6066 *
6067 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6068 * without first calling VNOP_OPEN, so we make sure the file is open here.
6069 */
6070 int
6071 nfs_vnop_read(
6072 struct vnop_read_args /* {
6073 struct vnodeop_desc *a_desc;
6074 vnode_t a_vp;
6075 struct uio *a_uio;
6076 int a_ioflag;
6077 vfs_context_t a_context;
6078 } */ *ap)
6079 {
6080 vnode_t vp = ap->a_vp;
6081 vfs_context_t ctx = ap->a_context;
6082 nfsnode_t np;
6083 struct nfsmount *nmp;
6084 struct nfs_open_owner *noop;
6085 struct nfs_open_file *nofp;
6086 int error;
6087
6088 if (vnode_vtype(ap->a_vp) != VREG)
6089 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6090
6091 np = VTONFS(vp);
6092 nmp = NFSTONMP(np);
6093 if (nfs_mount_gone(nmp))
6094 return (ENXIO);
6095 if (np->n_flag & NREVOKE)
6096 return (EIO);
6097
6098 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6099 if (!noop)
6100 return (ENOMEM);
6101 restart:
6102 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6103 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6104 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6105 error = EIO;
6106 }
6107 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6108 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6109 nofp = NULL;
6110 if (!error)
6111 goto restart;
6112 }
6113 if (error) {
6114 nfs_open_owner_rele(noop);
6115 return (error);
6116 }
6117 /*
6118 * Since the read path is a hot path, if we already have
6119 * read access, lets go and try and do the read, without
6120 * busying the mount and open file node for this open owner.
6121 *
6122 * N.B. This is inherently racy w.r.t. an execve using
6123 * an already open file, in that the read at the end of
6124 * this routine will be racing with a potential close.
6125 * The code below ultimately has the same problem. In practice
6126 * this does not seem to be an issue.
6127 */
6128 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6129 nfs_open_owner_rele(noop);
6130 goto do_read;
6131 }
6132 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6133 if (error) {
6134 nfs_open_owner_rele(noop);
6135 return (error);
6136 }
6137 /*
6138 * If we don't have a file already open with the access we need (read) then
6139 * we need to open one. Otherwise we just co-opt an open. We might not already
6140 * have access because we're trying to read the first page of the
6141 * file for execve.
6142 */
6143 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6144 if (error) {
6145 nfs_mount_state_in_use_end(nmp, 0);
6146 nfs_open_owner_rele(noop);
6147 return (error);
6148 }
6149 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6150 /* we don't have the file open, so open it for read access if we're not denied */
6151 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6152 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6153 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6154 }
6155 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6156 nfs_open_file_clear_busy(nofp);
6157 nfs_mount_state_in_use_end(nmp, 0);
6158 nfs_open_owner_rele(noop);
6159 return (EPERM);
6160 }
6161 if (np->n_flag & NREVOKE) {
6162 error = EIO;
6163 nfs_open_file_clear_busy(nofp);
6164 nfs_mount_state_in_use_end(nmp, 0);
6165 nfs_open_owner_rele(noop);
6166 return (error);
6167 }
6168 if (nmp->nm_vers < NFS_VER4) {
6169 /* NFS v2/v3 opens are always allowed - so just add it. */
6170 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6171 } else {
6172 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6173 }
6174 if (!error)
6175 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6176 }
6177 if (nofp)
6178 nfs_open_file_clear_busy(nofp);
6179 if (nfs_mount_state_in_use_end(nmp, error)) {
6180 nofp = NULL;
6181 goto restart;
6182 }
6183 nfs_open_owner_rele(noop);
6184 if (error)
6185 return (error);
6186 do_read:
6187 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
6188 }
6189
6190 /*
6191 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6192 * Files are created using the NFSv4 OPEN RPC. So we must open the
6193 * file to create it and then close it.
6194 */
6195 int
6196 nfs4_vnop_create(
6197 struct vnop_create_args /* {
6198 struct vnodeop_desc *a_desc;
6199 vnode_t a_dvp;
6200 vnode_t *a_vpp;
6201 struct componentname *a_cnp;
6202 struct vnode_attr *a_vap;
6203 vfs_context_t a_context;
6204 } */ *ap)
6205 {
6206 vfs_context_t ctx = ap->a_context;
6207 struct componentname *cnp = ap->a_cnp;
6208 struct vnode_attr *vap = ap->a_vap;
6209 vnode_t dvp = ap->a_dvp;
6210 vnode_t *vpp = ap->a_vpp;
6211 struct nfsmount *nmp;
6212 nfsnode_t np;
6213 int error = 0, busyerror = 0, accessMode, denyMode;
6214 struct nfs_open_owner *noop = NULL;
6215 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6216
6217 nmp = VTONMP(dvp);
6218 if (nfs_mount_gone(nmp))
6219 return (ENXIO);
6220
6221 if (vap)
6222 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6223
6224 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6225 if (!noop)
6226 return (ENOMEM);
6227
6228 restart:
6229 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6230 if (error) {
6231 nfs_open_owner_rele(noop);
6232 return (error);
6233 }
6234
6235 /* grab a provisional, nodeless open file */
6236 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6237 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6238 printf("nfs_vnop_create: LOST\n");
6239 error = EIO;
6240 }
6241 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6242 /* This shouldn't happen given that this is a new, nodeless nofp */
6243 nfs_mount_state_in_use_end(nmp, 0);
6244 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6245 nfs_open_file_destroy(newnofp);
6246 newnofp = NULL;
6247 if (!error)
6248 goto restart;
6249 }
6250 if (!error)
6251 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6252 if (error) {
6253 if (newnofp)
6254 nfs_open_file_destroy(newnofp);
6255 newnofp = NULL;
6256 goto out;
6257 }
6258
6259 /*
6260 * We're just trying to create the file.
6261 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6262 */
6263 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6264 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6265
6266 /* Do the open/create */
6267 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6268 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6269 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6270 /*
6271 * Hmm... it looks like we may have a situation where the request was
6272 * retransmitted because we didn't get the first response which successfully
6273 * created/opened the file and then the second time we were denied the open
6274 * because the mode the file was created with doesn't allow write access.
6275 *
6276 * We'll try to work around this by temporarily updating the mode and
6277 * retrying the open.
6278 */
6279 struct vnode_attr vattr;
6280
6281 /* first make sure it's there */
6282 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6283 if (!error2 && np) {
6284 nfs_node_unlock(np);
6285 *vpp = NFSTOV(np);
6286 if (vnode_vtype(NFSTOV(np)) == VREG) {
6287 VATTR_INIT(&vattr);
6288 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6289 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6290 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6291 VATTR_INIT(&vattr);
6292 VATTR_SET(&vattr, va_mode, vap->va_mode);
6293 nfs4_setattr_rpc(np, &vattr, ctx);
6294 if (!error2)
6295 error = 0;
6296 }
6297 }
6298 if (error) {
6299 vnode_put(*vpp);
6300 *vpp = NULL;
6301 }
6302 }
6303 }
6304 if (!error && !*vpp) {
6305 printf("nfs4_open_rpc returned without a node?\n");
6306 /* Hmmm... with no node, we have no filehandle and can't close it */
6307 error = EIO;
6308 }
6309 if (error) {
6310 /* need to cleanup our temporary nofp */
6311 nfs_open_file_clear_busy(newnofp);
6312 nfs_open_file_destroy(newnofp);
6313 newnofp = NULL;
6314 goto out;
6315 }
6316 /* After we have a node, add our open file struct to the node */
6317 np = VTONFS(*vpp);
6318 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6319 nofp = newnofp;
6320 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6321 if (error) {
6322 /* This shouldn't happen, because we passed in a new nofp to use. */
6323 printf("nfs_open_file_find_internal failed! %d\n", error);
6324 goto out;
6325 } else if (nofp != newnofp) {
6326 /*
6327 * Hmm... an open file struct already exists.
6328 * Mark the existing one busy and merge our open into it.
6329 * Then destroy the one we created.
6330 * Note: there's no chance of an open confict because the
6331 * open has already been granted.
6332 */
6333 busyerror = nfs_open_file_set_busy(nofp, NULL);
6334 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6335 nofp->nof_stateid = newnofp->nof_stateid;
6336 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
6337 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6338 nfs_open_file_clear_busy(newnofp);
6339 nfs_open_file_destroy(newnofp);
6340 }
6341 newnofp = NULL;
6342 /* mark the node as holding a create-initiated open */
6343 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6344 nofp->nof_creator = current_thread();
6345 out:
6346 if (nofp && !busyerror)
6347 nfs_open_file_clear_busy(nofp);
6348 if (nfs_mount_state_in_use_end(nmp, error)) {
6349 nofp = newnofp = NULL;
6350 busyerror = 0;
6351 goto restart;
6352 }
6353 if (noop)
6354 nfs_open_owner_rele(noop);
6355 return (error);
6356 }
6357
6358 /*
6359 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6360 */
6361 int
6362 nfs4_create_rpc(
6363 vfs_context_t ctx,
6364 nfsnode_t dnp,
6365 struct componentname *cnp,
6366 struct vnode_attr *vap,
6367 int type,
6368 char *link,
6369 nfsnode_t *npp)
6370 {
6371 struct nfsmount *nmp;
6372 struct nfs_vattr nvattr;
6373 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6374 int nfsvers, namedattrs, numops;
6375 u_int64_t xid, savedxid = 0;
6376 nfsnode_t np = NULL;
6377 vnode_t newvp = NULL;
6378 struct nfsm_chain nmreq, nmrep;
6379 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6380 const char *tag;
6381 nfs_specdata sd;
6382 fhandle_t fh;
6383 struct nfsreq rq, *req = &rq;
6384 struct nfs_dulookup dul;
6385 struct nfsreq_secinfo_args si;
6386
6387 nmp = NFSTONMP(dnp);
6388 if (nfs_mount_gone(nmp))
6389 return (ENXIO);
6390 nfsvers = nmp->nm_vers;
6391 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6392 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6393 return (EINVAL);
6394
6395 sd.specdata1 = sd.specdata2 = 0;
6396
6397 switch (type) {
6398 case NFLNK:
6399 tag = "symlink";
6400 break;
6401 case NFBLK:
6402 case NFCHR:
6403 tag = "mknod";
6404 if (!VATTR_IS_ACTIVE(vap, va_rdev))
6405 return (EINVAL);
6406 sd.specdata1 = major(vap->va_rdev);
6407 sd.specdata2 = minor(vap->va_rdev);
6408 break;
6409 case NFSOCK:
6410 case NFFIFO:
6411 tag = "mknod";
6412 break;
6413 case NFDIR:
6414 tag = "mkdir";
6415 break;
6416 default:
6417 return (EINVAL);
6418 }
6419
6420 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6421
6422 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6423 if (!namedattrs)
6424 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6425
6426 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6427 NVATTR_INIT(&nvattr);
6428 nfsm_chain_null(&nmreq);
6429 nfsm_chain_null(&nmrep);
6430
6431 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6432 numops = 6;
6433 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6434 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6435 numops--;
6436 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6437 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6438 numops--;
6439 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6440 numops--;
6441 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6442 nfsm_chain_add_32(error, &nmreq, type);
6443 if (type == NFLNK) {
6444 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6445 } else if ((type == NFBLK) || (type == NFCHR)) {
6446 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6447 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6448 }
6449 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6450 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6451 numops--;
6452 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6453 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6454 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6455 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6456 numops--;
6457 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6458 numops--;
6459 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6460 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6461 nfsm_chain_build_done(error, &nmreq);
6462 nfsm_assert(error, (numops == 0), EPROTO);
6463 nfsmout_if(error);
6464
6465 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6466 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6467 if (!error) {
6468 if (!namedattrs)
6469 nfs_dulookup_start(&dul, dnp, ctx);
6470 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6471 }
6472
6473 if ((lockerror = nfs_node_lock(dnp)))
6474 error = lockerror;
6475 nfsm_chain_skip_tag(error, &nmrep);
6476 nfsm_chain_get_32(error, &nmrep, numops);
6477 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6478 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6479 nfsmout_if(error);
6480 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6481 nfsm_chain_check_change_info(error, &nmrep, dnp);
6482 bmlen = NFS_ATTR_BITMAP_LEN;
6483 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6484 /* At this point if we have no error, the object was created. */
6485 /* if we don't get attributes, then we should lookitup. */
6486 create_error = error;
6487 nfsmout_if(error);
6488 nfs_vattr_set_supported(bitmap, vap);
6489 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6490 nfsmout_if(error);
6491 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6492 nfsmout_if(error);
6493 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6494 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6495 error = EBADRPC;
6496 goto nfsmout;
6497 }
6498 /* directory attributes: if we don't get them, make sure to invalidate */
6499 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6500 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6501 savedxid = xid;
6502 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
6503 if (error)
6504 NATTRINVALIDATE(dnp);
6505
6506 nfsmout:
6507 nfsm_chain_cleanup(&nmreq);
6508 nfsm_chain_cleanup(&nmrep);
6509
6510 if (!lockerror) {
6511 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
6512 dnp->n_flag &= ~NNEGNCENTRIES;
6513 cache_purge_negatives(NFSTOV(dnp));
6514 }
6515 dnp->n_flag |= NMODIFIED;
6516 nfs_node_unlock(dnp);
6517 /* nfs_getattr() will check changed and purge caches */
6518 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6519 }
6520
6521 if (!error && fh.fh_len) {
6522 /* create the vnode with the filehandle and attributes */
6523 xid = savedxid;
6524 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
6525 if (!error)
6526 newvp = NFSTOV(np);
6527 }
6528 NVATTR_CLEANUP(&nvattr);
6529
6530 if (!namedattrs)
6531 nfs_dulookup_finish(&dul, dnp, ctx);
6532
6533 /*
6534 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
6535 * if we can succeed in looking up the object.
6536 */
6537 if ((create_error == EEXIST) || (!create_error && !newvp)) {
6538 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6539 if (!error) {
6540 newvp = NFSTOV(np);
6541 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers))
6542 error = EEXIST;
6543 }
6544 }
6545 if (!busyerror)
6546 nfs_node_clear_busy(dnp);
6547 if (error) {
6548 if (newvp) {
6549 nfs_node_unlock(np);
6550 vnode_put(newvp);
6551 }
6552 } else {
6553 nfs_node_unlock(np);
6554 *npp = np;
6555 }
6556 return (error);
6557 }
6558
6559 int
6560 nfs4_vnop_mknod(
6561 struct vnop_mknod_args /* {
6562 struct vnodeop_desc *a_desc;
6563 vnode_t a_dvp;
6564 vnode_t *a_vpp;
6565 struct componentname *a_cnp;
6566 struct vnode_attr *a_vap;
6567 vfs_context_t a_context;
6568 } */ *ap)
6569 {
6570 nfsnode_t np = NULL;
6571 struct nfsmount *nmp;
6572 int error;
6573
6574 nmp = VTONMP(ap->a_dvp);
6575 if (nfs_mount_gone(nmp))
6576 return (ENXIO);
6577
6578 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
6579 return (EINVAL);
6580 switch (ap->a_vap->va_type) {
6581 case VBLK:
6582 case VCHR:
6583 case VFIFO:
6584 case VSOCK:
6585 break;
6586 default:
6587 return (ENOTSUP);
6588 }
6589
6590 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6591 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
6592 if (!error)
6593 *ap->a_vpp = NFSTOV(np);
6594 return (error);
6595 }
6596
6597 int
6598 nfs4_vnop_mkdir(
6599 struct vnop_mkdir_args /* {
6600 struct vnodeop_desc *a_desc;
6601 vnode_t a_dvp;
6602 vnode_t *a_vpp;
6603 struct componentname *a_cnp;
6604 struct vnode_attr *a_vap;
6605 vfs_context_t a_context;
6606 } */ *ap)
6607 {
6608 nfsnode_t np = NULL;
6609 int error;
6610
6611 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6612 NFDIR, NULL, &np);
6613 if (!error)
6614 *ap->a_vpp = NFSTOV(np);
6615 return (error);
6616 }
6617
6618 int
6619 nfs4_vnop_symlink(
6620 struct vnop_symlink_args /* {
6621 struct vnodeop_desc *a_desc;
6622 vnode_t a_dvp;
6623 vnode_t *a_vpp;
6624 struct componentname *a_cnp;
6625 struct vnode_attr *a_vap;
6626 char *a_target;
6627 vfs_context_t a_context;
6628 } */ *ap)
6629 {
6630 nfsnode_t np = NULL;
6631 int error;
6632
6633 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
6634 NFLNK, ap->a_target, &np);
6635 if (!error)
6636 *ap->a_vpp = NFSTOV(np);
6637 return (error);
6638 }
6639
6640 int
6641 nfs4_vnop_link(
6642 struct vnop_link_args /* {
6643 struct vnodeop_desc *a_desc;
6644 vnode_t a_vp;
6645 vnode_t a_tdvp;
6646 struct componentname *a_cnp;
6647 vfs_context_t a_context;
6648 } */ *ap)
6649 {
6650 vfs_context_t ctx = ap->a_context;
6651 vnode_t vp = ap->a_vp;
6652 vnode_t tdvp = ap->a_tdvp;
6653 struct componentname *cnp = ap->a_cnp;
6654 int error = 0, lockerror = ENOENT, status;
6655 struct nfsmount *nmp;
6656 nfsnode_t np = VTONFS(vp);
6657 nfsnode_t tdnp = VTONFS(tdvp);
6658 int nfsvers, numops;
6659 u_int64_t xid, savedxid;
6660 struct nfsm_chain nmreq, nmrep;
6661 struct nfsreq_secinfo_args si;
6662
6663 if (vnode_mount(vp) != vnode_mount(tdvp))
6664 return (EXDEV);
6665
6666 nmp = VTONMP(vp);
6667 if (nfs_mount_gone(nmp))
6668 return (ENXIO);
6669 nfsvers = nmp->nm_vers;
6670 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6671 return (EINVAL);
6672 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6673 return (EINVAL);
6674
6675 /*
6676 * Push all writes to the server, so that the attribute cache
6677 * doesn't get "out of sync" with the server.
6678 * XXX There should be a better way!
6679 */
6680 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
6681
6682 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
6683 return (error);
6684
6685 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6686 nfsm_chain_null(&nmreq);
6687 nfsm_chain_null(&nmrep);
6688
6689 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
6690 numops = 7;
6691 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
6692 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
6693 numops--;
6694 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6695 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6696 numops--;
6697 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6698 numops--;
6699 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6700 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
6701 numops--;
6702 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
6703 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6704 numops--;
6705 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6706 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
6707 numops--;
6708 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6709 numops--;
6710 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6711 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6712 nfsm_chain_build_done(error, &nmreq);
6713 nfsm_assert(error, (numops == 0), EPROTO);
6714 nfsmout_if(error);
6715 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
6716
6717 if ((lockerror = nfs_node_lock2(tdnp, np))) {
6718 error = lockerror;
6719 goto nfsmout;
6720 }
6721 nfsm_chain_skip_tag(error, &nmrep);
6722 nfsm_chain_get_32(error, &nmrep, numops);
6723 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6724 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6725 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6726 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
6727 nfsm_chain_check_change_info(error, &nmrep, tdnp);
6728 /* directory attributes: if we don't get them, make sure to invalidate */
6729 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6730 savedxid = xid;
6731 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
6732 if (error)
6733 NATTRINVALIDATE(tdnp);
6734 /* link attributes: if we don't get them, make sure to invalidate */
6735 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
6736 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6737 xid = savedxid;
6738 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6739 if (error)
6740 NATTRINVALIDATE(np);
6741 nfsmout:
6742 nfsm_chain_cleanup(&nmreq);
6743 nfsm_chain_cleanup(&nmrep);
6744 if (!lockerror)
6745 tdnp->n_flag |= NMODIFIED;
6746 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
6747 if (error == EEXIST)
6748 error = 0;
6749 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
6750 tdnp->n_flag &= ~NNEGNCENTRIES;
6751 cache_purge_negatives(tdvp);
6752 }
6753 if (!lockerror)
6754 nfs_node_unlock2(tdnp, np);
6755 nfs_node_clear_busy2(tdnp, np);
6756 return (error);
6757 }
6758
6759 int
6760 nfs4_vnop_rmdir(
6761 struct vnop_rmdir_args /* {
6762 struct vnodeop_desc *a_desc;
6763 vnode_t a_dvp;
6764 vnode_t a_vp;
6765 struct componentname *a_cnp;
6766 vfs_context_t a_context;
6767 } */ *ap)
6768 {
6769 vfs_context_t ctx = ap->a_context;
6770 vnode_t vp = ap->a_vp;
6771 vnode_t dvp = ap->a_dvp;
6772 struct componentname *cnp = ap->a_cnp;
6773 struct nfsmount *nmp;
6774 int error = 0, namedattrs;
6775 nfsnode_t np = VTONFS(vp);
6776 nfsnode_t dnp = VTONFS(dvp);
6777 struct nfs_dulookup dul;
6778
6779 if (vnode_vtype(vp) != VDIR)
6780 return (EINVAL);
6781
6782 nmp = NFSTONMP(dnp);
6783 if (nfs_mount_gone(nmp))
6784 return (ENXIO);
6785 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6786
6787 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
6788 return (error);
6789
6790 if (!namedattrs) {
6791 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6792 nfs_dulookup_start(&dul, dnp, ctx);
6793 }
6794
6795 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
6796 vfs_context_thread(ctx), vfs_context_ucred(ctx));
6797
6798 nfs_name_cache_purge(dnp, np, cnp, ctx);
6799 /* nfs_getattr() will check changed and purge caches */
6800 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
6801 if (!namedattrs)
6802 nfs_dulookup_finish(&dul, dnp, ctx);
6803 nfs_node_clear_busy2(dnp, np);
6804
6805 /*
6806 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
6807 */
6808 if (error == ENOENT)
6809 error = 0;
6810 if (!error) {
6811 /*
6812 * remove nfsnode from hash now so we can't accidentally find it
6813 * again if another object gets created with the same filehandle
6814 * before this vnode gets reclaimed
6815 */
6816 lck_mtx_lock(nfs_node_hash_mutex);
6817 if (np->n_hflag & NHHASHED) {
6818 LIST_REMOVE(np, n_hash);
6819 np->n_hflag &= ~NHHASHED;
6820 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
6821 }
6822 lck_mtx_unlock(nfs_node_hash_mutex);
6823 }
6824 return (error);
6825 }
6826
6827 /*
6828 * NFSv4 Named Attributes
6829 *
6830 * Both the extended attributes interface and the named streams interface
6831 * are backed by NFSv4 named attributes. The implementations for both use
6832 * a common set of routines in an attempt to reduce code duplication, to
6833 * increase efficiency, to increase caching of both names and data, and to
6834 * confine the complexity.
6835 *
6836 * Each NFS node caches its named attribute directory's file handle.
6837 * The directory nodes for the named attribute directories are handled
6838 * exactly like regular directories (with a couple minor exceptions).
6839 * Named attribute nodes are also treated as much like regular files as
6840 * possible.
6841 *
6842 * Most of the heavy lifting is done by nfs4_named_attr_get().
6843 */
6844
6845 /*
6846 * Get the given node's attribute directory node.
6847 * If !fetch, then only return a cached node.
6848 * Otherwise, we will attempt to fetch the node from the server.
6849 * (Note: the node should be marked busy.)
6850 */
6851 nfsnode_t
6852 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
6853 {
6854 nfsnode_t adnp = NULL;
6855 struct nfsmount *nmp;
6856 int error = 0, status, numops;
6857 struct nfsm_chain nmreq, nmrep;
6858 u_int64_t xid;
6859 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
6860 fhandle_t fh;
6861 struct nfs_vattr nvattr;
6862 struct componentname cn;
6863 struct nfsreq rq, *req = &rq;
6864 struct nfsreq_secinfo_args si;
6865
6866 nmp = NFSTONMP(np);
6867 if (nfs_mount_gone(nmp))
6868 return (NULL);
6869 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)
6870 return (NULL);
6871
6872 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6873 NVATTR_INIT(&nvattr);
6874 nfsm_chain_null(&nmreq);
6875 nfsm_chain_null(&nmrep);
6876
6877 bzero(&cn, sizeof(cn));
6878 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
6879 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
6880 cn.cn_nameiop = LOOKUP;
6881
6882 if (np->n_attrdirfh) {
6883 // XXX can't set parent correctly (to np) yet
6884 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh,
6885 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
6886 if (adnp)
6887 goto nfsmout;
6888 }
6889 if (!fetch) {
6890 error = ENOENT;
6891 goto nfsmout;
6892 }
6893
6894 // PUTFH, OPENATTR, GETATTR
6895 numops = 3;
6896 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
6897 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
6898 numops--;
6899 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6900 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
6901 numops--;
6902 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
6903 nfsm_chain_add_32(error, &nmreq, 0);
6904 numops--;
6905 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6906 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6907 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6908 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
6909 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
6910 nfsm_chain_build_done(error, &nmreq);
6911 nfsm_assert(error, (numops == 0), EPROTO);
6912 nfsmout_if(error);
6913 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6914 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6915 if (!error)
6916 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6917
6918 nfsm_chain_skip_tag(error, &nmrep);
6919 nfsm_chain_get_32(error, &nmrep, numops);
6920 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6921 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
6922 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6923 nfsmout_if(error);
6924 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6925 nfsmout_if(error);
6926 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
6927 error = ENOENT;
6928 goto nfsmout;
6929 }
6930 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
6931 /* (re)allocate attrdir fh buffer */
6932 if (np->n_attrdirfh)
6933 FREE(np->n_attrdirfh, M_TEMP);
6934 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
6935 }
6936 if (!np->n_attrdirfh) {
6937 error = ENOMEM;
6938 goto nfsmout;
6939 }
6940 /* cache the attrdir fh in the node */
6941 *np->n_attrdirfh = fh.fh_len;
6942 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
6943 /* create node for attrdir */
6944 // XXX can't set parent correctly (to np) yet
6945 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
6946 nfsmout:
6947 NVATTR_CLEANUP(&nvattr);
6948 nfsm_chain_cleanup(&nmreq);
6949 nfsm_chain_cleanup(&nmrep);
6950
6951 if (adnp) {
6952 /* sanity check that this node is an attribute directory */
6953 if (adnp->n_vattr.nva_type != VDIR)
6954 error = EINVAL;
6955 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR))
6956 error = EINVAL;
6957 nfs_node_unlock(adnp);
6958 if (error)
6959 vnode_put(NFSTOV(adnp));
6960 }
6961 return (error ? NULL : adnp);
6962 }
6963
6964 /*
6965 * Get the given node's named attribute node for the name given.
6966 *
6967 * In an effort to increase the performance of named attribute access, we try
6968 * to reduce server requests by doing the following:
6969 *
6970 * - cache the node's named attribute directory file handle in the node
6971 * - maintain a directory vnode for the attribute directory
6972 * - use name cache entries (positive and negative) to speed up lookups
6973 * - optionally open the named attribute (with the given accessMode) in the same RPC
6974 * - combine attribute directory retrieval with the lookup/open RPC
6975 * - optionally prefetch the named attribute's first block of data in the same RPC
6976 *
6977 * Also, in an attempt to reduce the number of copies/variations of this code,
6978 * parts of the RPC building/processing code are conditionalized on what is
6979 * needed for any particular request (openattr, lookup vs. open, read).
6980 *
6981 * Note that because we may not have the attribute directory node when we start
6982 * the lookup/open, we lock both the node and the attribute directory node.
6983 */
6984
6985 #define NFS_GET_NAMED_ATTR_CREATE 0x1
6986 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
6987 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
6988 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
6989
6990 int
6991 nfs4_named_attr_get(
6992 nfsnode_t np,
6993 struct componentname *cnp,
6994 uint32_t accessMode,
6995 int flags,
6996 vfs_context_t ctx,
6997 nfsnode_t *anpp,
6998 struct nfs_open_file **nofpp)
6999 {
7000 struct nfsmount *nmp;
7001 int error = 0, open_error = EIO;
7002 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7003 int create, guarded, prefetch, truncate, noopbusy = 0;
7004 int open, status, numops, hadattrdir, negnamecache;
7005 struct nfs_vattr nvattr;
7006 struct vnode_attr vattr;
7007 nfsnode_t adnp = NULL, anp = NULL;
7008 vnode_t avp = NULL;
7009 u_int64_t xid, savedxid = 0;
7010 struct nfsm_chain nmreq, nmrep;
7011 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7012 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7013 nfs_stateid stateid, dstateid;
7014 fhandle_t fh;
7015 struct nfs_open_owner *noop = NULL;
7016 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7017 struct vnop_access_args naa;
7018 thread_t thd;
7019 kauth_cred_t cred;
7020 struct timeval now;
7021 char sbuf[64], *s;
7022 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7023 struct kauth_ace ace;
7024 struct nfsreq rq, *req = &rq;
7025 struct nfsreq_secinfo_args si;
7026
7027 *anpp = NULL;
7028 fh.fh_len = 0;
7029 rflags = delegation = recall = eof = rlen = retlen = 0;
7030 ace.ace_flags = 0;
7031 s = sbuf;
7032 slen = sizeof(sbuf);
7033
7034 nmp = NFSTONMP(np);
7035 if (nfs_mount_gone(nmp))
7036 return (ENXIO);
7037 NVATTR_INIT(&nvattr);
7038 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7039 thd = vfs_context_thread(ctx);
7040 cred = vfs_context_ucred(ctx);
7041 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7042 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7043 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7044 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7045
7046 if (!create) {
7047 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7048 if (error)
7049 return (error);
7050 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7051 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7052 return (ENOATTR);
7053 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7054 /* shouldn't happen... but just be safe */
7055 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7056 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7057 }
7058 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7059 if (open) {
7060 /*
7061 * We're trying to open the file.
7062 * We'll create/open it with the given access mode,
7063 * and set NFS_OPEN_FILE_CREATE.
7064 */
7065 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7066 if (prefetch && guarded)
7067 prefetch = 0; /* no sense prefetching data that can't be there */
7068
7069 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7070 if (!noop)
7071 return (ENOMEM);
7072 }
7073
7074 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7075 return (error);
7076
7077 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7078 hadattrdir = (adnp != NULL);
7079 if (prefetch) {
7080 microuptime(&now);
7081 /* use the special state ID because we don't have a real one to send */
7082 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7083 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7084 }
7085 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7086 nfsm_chain_null(&nmreq);
7087 nfsm_chain_null(&nmrep);
7088
7089 if (hadattrdir) {
7090 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx))))
7091 goto nfsmout;
7092 /* nfs_getattr() will check changed and purge caches */
7093 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7094 nfsmout_if(error);
7095 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7096 switch (error) {
7097 case ENOENT:
7098 /* negative cache entry */
7099 goto nfsmout;
7100 case 0:
7101 /* cache miss */
7102 /* try dir buf cache lookup */
7103 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7104 if (!error && anp) {
7105 /* dir buf cache hit */
7106 *anpp = anp;
7107 error = -1;
7108 }
7109 if (error != -1) /* cache miss */
7110 break;
7111 /* FALLTHROUGH */
7112 case -1:
7113 /* cache hit, not really an error */
7114 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7115 if (!anp && avp)
7116 *anpp = anp = VTONFS(avp);
7117
7118 nfs_node_clear_busy(adnp);
7119 adbusyerror = ENOENT;
7120
7121 /* check for directory access */
7122 naa.a_desc = &vnop_access_desc;
7123 naa.a_vp = NFSTOV(adnp);
7124 naa.a_action = KAUTH_VNODE_SEARCH;
7125 naa.a_context = ctx;
7126
7127 /* compute actual success/failure based on accessibility */
7128 error = nfs_vnop_access(&naa);
7129 /* FALLTHROUGH */
7130 default:
7131 /* we either found it, or hit an error */
7132 if (!error && guarded) {
7133 /* found cached entry but told not to use it */
7134 error = EEXIST;
7135 vnode_put(NFSTOV(anp));
7136 *anpp = anp = NULL;
7137 }
7138 /* we're done if error or we don't need to open */
7139 if (error || !open)
7140 goto nfsmout;
7141 /* no error and we need to open... */
7142 }
7143 }
7144
7145 if (open) {
7146 restart:
7147 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7148 if (error) {
7149 nfs_open_owner_rele(noop);
7150 noop = NULL;
7151 goto nfsmout;
7152 }
7153 inuse = 1;
7154
7155 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7156 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7157 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7158 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7159 error = EIO;
7160 }
7161 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7162 nfs_mount_state_in_use_end(nmp, 0);
7163 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7164 nfs_open_file_destroy(newnofp);
7165 newnofp = NULL;
7166 if (!error)
7167 goto restart;
7168 }
7169 if (!error)
7170 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7171 if (error) {
7172 if (newnofp)
7173 nfs_open_file_destroy(newnofp);
7174 newnofp = NULL;
7175 goto nfsmout;
7176 }
7177 if (anp) {
7178 /*
7179 * We already have the node. So we just need to open
7180 * it - which we may be able to do with a delegation.
7181 */
7182 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7183 if (!error) {
7184 /* open succeeded, so our open file is no longer temporary */
7185 nofp = newnofp;
7186 nofpbusyerror = 0;
7187 newnofp = NULL;
7188 if (nofpp)
7189 *nofpp = nofp;
7190 }
7191 goto nfsmout;
7192 }
7193 }
7194
7195 /*
7196 * We either don't have the attrdir or we didn't find the attribute
7197 * in the name cache, so we need to talk to the server.
7198 *
7199 * If we don't have the attrdir, we'll need to ask the server for that too.
7200 * If the caller is requesting that the attribute be created, we need to
7201 * make sure the attrdir is created.
7202 * The caller may also request that the first block of an existing attribute
7203 * be retrieved at the same time.
7204 */
7205
7206 if (open) {
7207 /* need to mark the open owner busy during the RPC */
7208 if ((error = nfs_open_owner_set_busy(noop, thd)))
7209 goto nfsmout;
7210 noopbusy = 1;
7211 }
7212
7213 /*
7214 * We'd like to get updated post-open/lookup attributes for the
7215 * directory and we may also want to prefetch some data via READ.
7216 * We'd like the READ results to be last so that we can leave the
7217 * data in the mbufs until the end.
7218 *
7219 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7220 */
7221 numops = 5;
7222 if (!hadattrdir)
7223 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7224 if (prefetch)
7225 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7226 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7227 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7228 if (hadattrdir) {
7229 numops--;
7230 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7231 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7232 } else {
7233 numops--;
7234 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7235 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7236 numops--;
7237 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7238 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7239 numops--;
7240 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7241 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7242 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7243 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7244 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7245 }
7246 if (open) {
7247 numops--;
7248 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7249 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7250 nfsm_chain_add_32(error, &nmreq, accessMode);
7251 nfsm_chain_add_32(error, &nmreq, denyMode);
7252 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7253 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7254 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7255 nfsm_chain_add_32(error, &nmreq, create);
7256 if (create) {
7257 nfsm_chain_add_32(error, &nmreq, guarded);
7258 VATTR_INIT(&vattr);
7259 if (truncate)
7260 VATTR_SET(&vattr, va_data_size, 0);
7261 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7262 }
7263 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7264 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7265 } else {
7266 numops--;
7267 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7268 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7269 }
7270 numops--;
7271 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7272 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7273 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7274 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7275 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7276 if (prefetch) {
7277 numops--;
7278 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7279 }
7280 if (hadattrdir) {
7281 numops--;
7282 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7283 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7284 } else {
7285 numops--;
7286 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7287 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7288 numops--;
7289 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7290 nfsm_chain_add_32(error, &nmreq, 0);
7291 }
7292 numops--;
7293 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7294 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7295 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7296 if (prefetch) {
7297 numops--;
7298 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7299 numops--;
7300 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7301 VATTR_INIT(&vattr);
7302 VATTR_SET(&vattr, va_data_size, 0);
7303 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7304 numops--;
7305 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7306 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7307 nfsm_chain_add_64(error, &nmreq, 0);
7308 nfsm_chain_add_32(error, &nmreq, rlen);
7309 }
7310 nfsm_chain_build_done(error, &nmreq);
7311 nfsm_assert(error, (numops == 0), EPROTO);
7312 nfsmout_if(error);
7313 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7314 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7315 if (!error)
7316 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7317
7318 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp))))
7319 error = adlockerror;
7320 savedxid = xid;
7321 nfsm_chain_skip_tag(error, &nmrep);
7322 nfsm_chain_get_32(error, &nmrep, numops);
7323 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7324 if (!hadattrdir) {
7325 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7326 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7327 nfsmout_if(error);
7328 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7329 nfsmout_if(error);
7330 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7331 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7332 /* (re)allocate attrdir fh buffer */
7333 if (np->n_attrdirfh)
7334 FREE(np->n_attrdirfh, M_TEMP);
7335 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK);
7336 }
7337 if (np->n_attrdirfh) {
7338 /* remember the attrdir fh in the node */
7339 *np->n_attrdirfh = fh.fh_len;
7340 bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len);
7341 /* create busied node for attrdir */
7342 struct componentname cn;
7343 bzero(&cn, sizeof(cn));
7344 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7345 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7346 cn.cn_nameiop = LOOKUP;
7347 // XXX can't set parent correctly (to np) yet
7348 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7349 if (!error) {
7350 adlockerror = 0;
7351 /* set the node busy */
7352 SET(adnp->n_flag, NBUSY);
7353 adbusyerror = 0;
7354 }
7355 /* if no adnp, oh well... */
7356 error = 0;
7357 }
7358 }
7359 NVATTR_CLEANUP(&nvattr);
7360 fh.fh_len = 0;
7361 }
7362 if (open) {
7363 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7364 nfs_owner_seqid_increment(noop, NULL, error);
7365 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7366 nfsm_chain_check_change_info(error, &nmrep, adnp);
7367 nfsm_chain_get_32(error, &nmrep, rflags);
7368 bmlen = NFS_ATTR_BITMAP_LEN;
7369 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7370 nfsm_chain_get_32(error, &nmrep, delegation);
7371 if (!error)
7372 switch (delegation) {
7373 case NFS_OPEN_DELEGATE_NONE:
7374 break;
7375 case NFS_OPEN_DELEGATE_READ:
7376 case NFS_OPEN_DELEGATE_WRITE:
7377 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7378 nfsm_chain_get_32(error, &nmrep, recall);
7379 if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX
7380 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7381 /* if we have any trouble accepting the ACE, just invalidate it */
7382 ace_type = ace_flags = ace_mask = len = 0;
7383 nfsm_chain_get_32(error, &nmrep, ace_type);
7384 nfsm_chain_get_32(error, &nmrep, ace_flags);
7385 nfsm_chain_get_32(error, &nmrep, ace_mask);
7386 nfsm_chain_get_32(error, &nmrep, len);
7387 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7388 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7389 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7390 if (!error && (len >= slen)) {
7391 MALLOC(s, char*, len+1, M_TEMP, M_WAITOK);
7392 if (s)
7393 slen = len+1;
7394 else
7395 ace.ace_flags = 0;
7396 }
7397 if (s)
7398 nfsm_chain_get_opaque(error, &nmrep, len, s);
7399 else
7400 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7401 if (!error && s) {
7402 s[len] = '\0';
7403 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP)))
7404 ace.ace_flags = 0;
7405 }
7406 if (error || !s)
7407 ace.ace_flags = 0;
7408 if (s && (s != sbuf))
7409 FREE(s, M_TEMP);
7410 break;
7411 default:
7412 error = EBADRPC;
7413 break;
7414 }
7415 /* At this point if we have no error, the object was created/opened. */
7416 open_error = error;
7417 } else {
7418 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7419 }
7420 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7421 nfsmout_if(error);
7422 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7423 nfsmout_if(error);
7424 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7425 error = EIO;
7426 goto nfsmout;
7427 }
7428 if (prefetch)
7429 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7430 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7431 if (!hadattrdir)
7432 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7433 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7434 nfsmout_if(error);
7435 xid = savedxid;
7436 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
7437 nfsmout_if(error);
7438
7439 if (open) {
7440 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
7441 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7442 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
7443 if (adnp) {
7444 nfs_node_unlock(adnp);
7445 adlockerror = ENOENT;
7446 }
7447 NVATTR_CLEANUP(&nvattr);
7448 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
7449 nfsmout_if(error);
7450 savedxid = xid;
7451 if ((adlockerror = nfs_node_lock(adnp)))
7452 error = adlockerror;
7453 }
7454 }
7455
7456 nfsmout:
7457 if (open && adnp && !adlockerror) {
7458 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
7459 adnp->n_flag &= ~NNEGNCENTRIES;
7460 cache_purge_negatives(NFSTOV(adnp));
7461 }
7462 adnp->n_flag |= NMODIFIED;
7463 nfs_node_unlock(adnp);
7464 adlockerror = ENOENT;
7465 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7466 }
7467 if (adnp && !adlockerror && (error == ENOENT) &&
7468 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
7469 /* add a negative entry in the name cache */
7470 cache_enter(NFSTOV(adnp), NULL, cnp);
7471 adnp->n_flag |= NNEGNCENTRIES;
7472 }
7473 if (adnp && !adlockerror) {
7474 nfs_node_unlock(adnp);
7475 adlockerror = ENOENT;
7476 }
7477 if (!error && !anp && fh.fh_len) {
7478 /* create the vnode with the filehandle and attributes */
7479 xid = savedxid;
7480 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
7481 if (!error) {
7482 *anpp = anp;
7483 nfs_node_unlock(anp);
7484 }
7485 if (!error && open) {
7486 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
7487 /* After we have a node, add our open file struct to the node */
7488 nofp = newnofp;
7489 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
7490 if (error) {
7491 /* This shouldn't happen, because we passed in a new nofp to use. */
7492 printf("nfs_open_file_find_internal failed! %d\n", error);
7493 nofp = NULL;
7494 } else if (nofp != newnofp) {
7495 /*
7496 * Hmm... an open file struct already exists.
7497 * Mark the existing one busy and merge our open into it.
7498 * Then destroy the one we created.
7499 * Note: there's no chance of an open confict because the
7500 * open has already been granted.
7501 */
7502 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
7503 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
7504 nofp->nof_stateid = newnofp->nof_stateid;
7505 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)
7506 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
7507 nfs_open_file_clear_busy(newnofp);
7508 nfs_open_file_destroy(newnofp);
7509 newnofp = NULL;
7510 }
7511 if (!error) {
7512 newnofp = NULL;
7513 nofpbusyerror = 0;
7514 /* mark the node as holding a create-initiated open */
7515 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
7516 nofp->nof_creator = current_thread();
7517 if (nofpp)
7518 *nofpp = nofp;
7519 }
7520 }
7521 }
7522 NVATTR_CLEANUP(&nvattr);
7523 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
7524 if (!error && anp && !recall) {
7525 /* stuff the delegation state in the node */
7526 lck_mtx_lock(&anp->n_openlock);
7527 anp->n_openflags &= ~N_DELEG_MASK;
7528 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7529 anp->n_dstateid = dstateid;
7530 anp->n_dace = ace;
7531 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7532 lck_mtx_lock(&nmp->nm_lock);
7533 if (anp->n_dlink.tqe_next == NFSNOLIST)
7534 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7535 lck_mtx_unlock(&nmp->nm_lock);
7536 }
7537 lck_mtx_unlock(&anp->n_openlock);
7538 } else {
7539 /* give the delegation back */
7540 if (anp) {
7541 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
7542 /* update delegation state and return it */
7543 lck_mtx_lock(&anp->n_openlock);
7544 anp->n_openflags &= ~N_DELEG_MASK;
7545 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
7546 anp->n_dstateid = dstateid;
7547 anp->n_dace = ace;
7548 if (anp->n_dlink.tqe_next == NFSNOLIST) {
7549 lck_mtx_lock(&nmp->nm_lock);
7550 if (anp->n_dlink.tqe_next == NFSNOLIST)
7551 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
7552 lck_mtx_unlock(&nmp->nm_lock);
7553 }
7554 lck_mtx_unlock(&anp->n_openlock);
7555 /* don't need to send a separate delegreturn for fh */
7556 fh.fh_len = 0;
7557 }
7558 /* return anp's current delegation */
7559 nfs4_delegation_return(anp, 0, thd, cred);
7560 }
7561 if (fh.fh_len) /* return fh's delegation if it wasn't for anp */
7562 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
7563 }
7564 }
7565 if (open) {
7566 if (newnofp) {
7567 /* need to cleanup our temporary nofp */
7568 nfs_open_file_clear_busy(newnofp);
7569 nfs_open_file_destroy(newnofp);
7570 newnofp = NULL;
7571 } else if (nofp && !nofpbusyerror) {
7572 nfs_open_file_clear_busy(nofp);
7573 nofpbusyerror = ENOENT;
7574 }
7575 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
7576 inuse = 0;
7577 nofp = newnofp = NULL;
7578 rflags = delegation = recall = eof = rlen = retlen = 0;
7579 ace.ace_flags = 0;
7580 s = sbuf;
7581 slen = sizeof(sbuf);
7582 nfsm_chain_cleanup(&nmreq);
7583 nfsm_chain_cleanup(&nmrep);
7584 if (anp) {
7585 vnode_put(NFSTOV(anp));
7586 *anpp = anp = NULL;
7587 }
7588 hadattrdir = (adnp != NULL);
7589 if (noopbusy) {
7590 nfs_open_owner_clear_busy(noop);
7591 noopbusy = 0;
7592 }
7593 goto restart;
7594 }
7595 if (noop) {
7596 if (noopbusy) {
7597 nfs_open_owner_clear_busy(noop);
7598 noopbusy = 0;
7599 }
7600 nfs_open_owner_rele(noop);
7601 }
7602 }
7603 if (!error && prefetch && nmrep.nmc_mhead) {
7604 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7605 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
7606 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
7607 nfsm_chain_get_32(error, &nmrep, eof);
7608 nfsm_chain_get_32(error, &nmrep, retlen);
7609 if (!error && anp) {
7610 /*
7611 * There can be one problem with doing the prefetch.
7612 * Because we don't have the node before we start the RPC, we
7613 * can't have the buffer busy while the READ is performed.
7614 * So there is a chance that other I/O occured on the same
7615 * range of data while we were performing this RPC. If that
7616 * happens, then it's possible the data we have in the READ
7617 * response is no longer up to date.
7618 * Once we have the node and the buffer, we need to make sure
7619 * that there's no chance we could be putting stale data in
7620 * the buffer.
7621 * So, we check if the range read is dirty or if any I/O may
7622 * have occured on it while we were performing our RPC.
7623 */
7624 struct nfsbuf *bp = NULL;
7625 int lastpg;
7626 uint32_t pagemask;
7627
7628 retlen = MIN(retlen, rlen);
7629
7630 /* check if node needs size update or invalidation */
7631 if (ISSET(anp->n_flag, NUPDATESIZE))
7632 nfs_data_update_size(anp, 0);
7633 if (!(error = nfs_node_lock(anp))) {
7634 if (anp->n_flag & NNEEDINVALIDATE) {
7635 anp->n_flag &= ~NNEEDINVALIDATE;
7636 nfs_node_unlock(anp);
7637 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1);
7638 if (!error) /* lets play it safe and just drop the data */
7639 error = EIO;
7640 } else {
7641 nfs_node_unlock(anp);
7642 }
7643 }
7644
7645 /* calculate page mask for the range of data read */
7646 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
7647 pagemask = ((1 << (lastpg + 1)) - 1);
7648
7649 if (!error)
7650 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp);
7651 /* don't save the data if dirty or potential I/O conflict */
7652 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
7653 timevalcmp(&anp->n_lastio, &now, <)) {
7654 OSAddAtomic64(1, &nfsstats.read_bios);
7655 CLR(bp->nb_flags, (NB_DONE|NB_ASYNC));
7656 SET(bp->nb_flags, NB_READ);
7657 NFS_BUF_MAP(bp);
7658 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
7659 if (error) {
7660 bp->nb_error = error;
7661 SET(bp->nb_flags, NB_ERROR);
7662 } else {
7663 bp->nb_offio = 0;
7664 bp->nb_endio = rlen;
7665 if ((retlen > 0) && (bp->nb_endio < (int)retlen))
7666 bp->nb_endio = retlen;
7667 if (eof || (retlen == 0)) {
7668 /* zero out the remaining data (up to EOF) */
7669 off_t rpcrem, eofrem, rem;
7670 rpcrem = (rlen - retlen);
7671 eofrem = anp->n_size - (NBOFF(bp) + retlen);
7672 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
7673 if (rem > 0)
7674 bzero(bp->nb_data + retlen, rem);
7675 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
7676 /* ugh... short read ... just invalidate for now... */
7677 SET(bp->nb_flags, NB_INVAL);
7678 }
7679 }
7680 nfs_buf_read_finish(bp);
7681 microuptime(&anp->n_lastio);
7682 }
7683 if (bp)
7684 nfs_buf_release(bp, 1);
7685 }
7686 error = 0; /* ignore any transient error in processing the prefetch */
7687 }
7688 if (adnp && !adbusyerror) {
7689 nfs_node_clear_busy(adnp);
7690 adbusyerror = ENOENT;
7691 }
7692 if (!busyerror) {
7693 nfs_node_clear_busy(np);
7694 busyerror = ENOENT;
7695 }
7696 if (adnp)
7697 vnode_put(NFSTOV(adnp));
7698 if (error && *anpp) {
7699 vnode_put(NFSTOV(*anpp));
7700 *anpp = NULL;
7701 }
7702 nfsm_chain_cleanup(&nmreq);
7703 nfsm_chain_cleanup(&nmrep);
7704 return (error);
7705 }
7706
7707 /*
7708 * Remove a named attribute.
7709 */
7710 int
7711 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
7712 {
7713 nfsnode_t adnp = NULL;
7714 struct nfsmount *nmp;
7715 struct componentname cn;
7716 struct vnop_remove_args vra;
7717 int error, putanp = 0;
7718
7719 nmp = NFSTONMP(np);
7720 if (nfs_mount_gone(nmp))
7721 return (ENXIO);
7722
7723 bzero(&cn, sizeof(cn));
7724 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7725 cn.cn_namelen = strlen(name);
7726 cn.cn_nameiop = DELETE;
7727 cn.cn_flags = 0;
7728
7729 if (!anp) {
7730 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7731 0, ctx, &anp, NULL);
7732 if ((!error && !anp) || (error == ENOATTR))
7733 error = ENOENT;
7734 if (error) {
7735 if (anp) {
7736 vnode_put(NFSTOV(anp));
7737 anp = NULL;
7738 }
7739 goto out;
7740 }
7741 putanp = 1;
7742 }
7743
7744 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
7745 goto out;
7746 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
7747 nfs_node_clear_busy(np);
7748 if (!adnp) {
7749 error = ENOENT;
7750 goto out;
7751 }
7752
7753 vra.a_desc = &vnop_remove_desc;
7754 vra.a_dvp = NFSTOV(adnp);
7755 vra.a_vp = NFSTOV(anp);
7756 vra.a_cnp = &cn;
7757 vra.a_flags = 0;
7758 vra.a_context = ctx;
7759 error = nfs_vnop_remove(&vra);
7760 out:
7761 if (adnp)
7762 vnode_put(NFSTOV(adnp));
7763 if (putanp)
7764 vnode_put(NFSTOV(anp));
7765 return (error);
7766 }
7767
7768 int
7769 nfs4_vnop_getxattr(
7770 struct vnop_getxattr_args /* {
7771 struct vnodeop_desc *a_desc;
7772 vnode_t a_vp;
7773 const char * a_name;
7774 uio_t a_uio;
7775 size_t *a_size;
7776 int a_options;
7777 vfs_context_t a_context;
7778 } */ *ap)
7779 {
7780 vfs_context_t ctx = ap->a_context;
7781 struct nfsmount *nmp;
7782 struct nfs_vattr nvattr;
7783 struct componentname cn;
7784 nfsnode_t anp;
7785 int error = 0, isrsrcfork;
7786
7787 nmp = VTONMP(ap->a_vp);
7788 if (nfs_mount_gone(nmp))
7789 return (ENXIO);
7790
7791 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7792 return (ENOTSUP);
7793 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
7794 if (error)
7795 return (error);
7796 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7797 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
7798 return (ENOATTR);
7799
7800 bzero(&cn, sizeof(cn));
7801 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
7802 cn.cn_namelen = strlen(ap->a_name);
7803 cn.cn_nameiop = LOOKUP;
7804 cn.cn_flags = MAKEENTRY;
7805
7806 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
7807 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7808
7809 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
7810 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
7811 if ((!error && !anp) || (error == ENOENT))
7812 error = ENOATTR;
7813 if (!error) {
7814 if (ap->a_uio)
7815 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
7816 else
7817 *ap->a_size = anp->n_size;
7818 }
7819 if (anp)
7820 vnode_put(NFSTOV(anp));
7821 return (error);
7822 }
7823
7824 int
7825 nfs4_vnop_setxattr(
7826 struct vnop_setxattr_args /* {
7827 struct vnodeop_desc *a_desc;
7828 vnode_t a_vp;
7829 const char * a_name;
7830 uio_t a_uio;
7831 int a_options;
7832 vfs_context_t a_context;
7833 } */ *ap)
7834 {
7835 vfs_context_t ctx = ap->a_context;
7836 int options = ap->a_options;
7837 uio_t uio = ap->a_uio;
7838 const char *name = ap->a_name;
7839 struct nfsmount *nmp;
7840 struct componentname cn;
7841 nfsnode_t anp = NULL;
7842 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
7843 #define FINDERINFOSIZE 32
7844 uint8_t finfo[FINDERINFOSIZE];
7845 uint32_t *finfop;
7846 struct nfs_open_file *nofp = NULL;
7847 char uio_buf [ UIO_SIZEOF(1) ];
7848 uio_t auio;
7849 struct vnop_write_args vwa;
7850
7851 nmp = VTONMP(ap->a_vp);
7852 if (nfs_mount_gone(nmp))
7853 return (ENXIO);
7854
7855 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7856 return (ENOTSUP);
7857
7858 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE))
7859 return (EINVAL);
7860
7861 /* XXX limitation based on need to back up uio on short write */
7862 if (uio_iovcnt(uio) > 1) {
7863 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
7864 return (EINVAL);
7865 }
7866
7867 bzero(&cn, sizeof(cn));
7868 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
7869 cn.cn_namelen = strlen(name);
7870 cn.cn_nameiop = CREATE;
7871 cn.cn_flags = MAKEENTRY;
7872
7873 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
7874 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
7875 if (!isrsrcfork)
7876 uio_setoffset(uio, 0);
7877 if (isfinderinfo) {
7878 if (uio_resid(uio) != sizeof(finfo))
7879 return (ERANGE);
7880 error = uiomove((char*)&finfo, sizeof(finfo), uio);
7881 if (error)
7882 return (error);
7883 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
7884 empty = 1;
7885 for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++)
7886 if (finfop[i]) {
7887 empty = 0;
7888 break;
7889 }
7890 if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) {
7891 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7892 if (error == ENOENT)
7893 error = 0;
7894 return (error);
7895 }
7896 /* first, let's see if we get a create/replace error */
7897 }
7898
7899 /*
7900 * create/open the xattr
7901 *
7902 * We need to make sure not to create it if XATTR_REPLACE.
7903 * For all xattrs except the resource fork, we also want to
7904 * truncate the xattr to remove any current data. We'll do
7905 * that by setting the size to 0 on create/open.
7906 */
7907 flags = 0;
7908 if (!(options & XATTR_REPLACE))
7909 flags |= NFS_GET_NAMED_ATTR_CREATE;
7910 if (options & XATTR_CREATE)
7911 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
7912 if (!isrsrcfork)
7913 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
7914
7915 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
7916 flags, ctx, &anp, &nofp);
7917 if (!error && !anp)
7918 error = ENOATTR;
7919 if (error)
7920 goto out;
7921 /* grab the open state from the get/create/open */
7922 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
7923 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
7924 nofp->nof_creator = NULL;
7925 nfs_open_file_clear_busy(nofp);
7926 }
7927
7928 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
7929 if (isfinderinfo && empty)
7930 goto doclose;
7931
7932 /*
7933 * Write the data out and flush.
7934 *
7935 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
7936 */
7937 vwa.a_desc = &vnop_write_desc;
7938 vwa.a_vp = NFSTOV(anp);
7939 vwa.a_uio = NULL;
7940 vwa.a_ioflag = 0;
7941 vwa.a_context = ctx;
7942 if (isfinderinfo) {
7943 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
7944 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
7945 vwa.a_uio = auio;
7946 } else if (uio_resid(uio) > 0) {
7947 vwa.a_uio = uio;
7948 }
7949 if (vwa.a_uio) {
7950 error = nfs_vnop_write(&vwa);
7951 if (!error)
7952 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
7953 }
7954 doclose:
7955 /* Close the xattr. */
7956 if (nofp) {
7957 int busyerror = nfs_open_file_set_busy(nofp, NULL);
7958 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
7959 if (!busyerror)
7960 nfs_open_file_clear_busy(nofp);
7961 }
7962 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
7963 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
7964 if (error == ENOENT)
7965 error = 0;
7966 }
7967 if (!error)
7968 error = closeerror;
7969 out:
7970 if (anp)
7971 vnode_put(NFSTOV(anp));
7972 if (error == ENOENT)
7973 error = ENOATTR;
7974 return (error);
7975 }
7976
7977 int
7978 nfs4_vnop_removexattr(
7979 struct vnop_removexattr_args /* {
7980 struct vnodeop_desc *a_desc;
7981 vnode_t a_vp;
7982 const char * a_name;
7983 int a_options;
7984 vfs_context_t a_context;
7985 } */ *ap)
7986 {
7987 struct nfsmount *nmp = VTONMP(ap->a_vp);
7988 int error;
7989
7990 if (nfs_mount_gone(nmp))
7991 return (ENXIO);
7992 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
7993 return (ENOTSUP);
7994
7995 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
7996 if (error == ENOENT)
7997 error = ENOATTR;
7998 return (error);
7999 }
8000
8001 int
8002 nfs4_vnop_listxattr(
8003 struct vnop_listxattr_args /* {
8004 struct vnodeop_desc *a_desc;
8005 vnode_t a_vp;
8006 uio_t a_uio;
8007 size_t *a_size;
8008 int a_options;
8009 vfs_context_t a_context;
8010 } */ *ap)
8011 {
8012 vfs_context_t ctx = ap->a_context;
8013 nfsnode_t np = VTONFS(ap->a_vp);
8014 uio_t uio = ap->a_uio;
8015 nfsnode_t adnp = NULL;
8016 struct nfsmount *nmp;
8017 int error, done, i;
8018 struct nfs_vattr nvattr;
8019 uint64_t cookie, nextcookie, lbn = 0;
8020 struct nfsbuf *bp = NULL;
8021 struct nfs_dir_buf_header *ndbhp;
8022 struct direntry *dp;
8023
8024 nmp = VTONMP(ap->a_vp);
8025 if (nfs_mount_gone(nmp))
8026 return (ENXIO);
8027
8028 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8029 return (ENOTSUP);
8030
8031 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8032 if (error)
8033 return (error);
8034 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8035 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8036 return (0);
8037
8038 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx))))
8039 return (error);
8040 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8041 nfs_node_clear_busy(np);
8042 if (!adnp)
8043 goto out;
8044
8045 if ((error = nfs_node_lock(adnp)))
8046 goto out;
8047
8048 if (adnp->n_flag & NNEEDINVALIDATE) {
8049 adnp->n_flag &= ~NNEEDINVALIDATE;
8050 nfs_invaldir(adnp);
8051 nfs_node_unlock(adnp);
8052 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8053 if (!error)
8054 error = nfs_node_lock(adnp);
8055 if (error)
8056 goto out;
8057 }
8058
8059 /*
8060 * check for need to invalidate when (re)starting at beginning
8061 */
8062 if (adnp->n_flag & NMODIFIED) {
8063 nfs_invaldir(adnp);
8064 nfs_node_unlock(adnp);
8065 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1)))
8066 goto out;
8067 } else {
8068 nfs_node_unlock(adnp);
8069 }
8070 /* nfs_getattr() will check changed and purge caches */
8071 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED)))
8072 goto out;
8073
8074 if (uio && (uio_resid(uio) == 0))
8075 goto out;
8076
8077 done = 0;
8078 nextcookie = lbn = 0;
8079
8080 while (!error && !done) {
8081 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8082 cookie = nextcookie;
8083 getbuffer:
8084 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8085 if (error)
8086 goto out;
8087 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8088 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8089 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8090 ndbhp->ndbh_flags = 0;
8091 ndbhp->ndbh_count = 0;
8092 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8093 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8094 }
8095 error = nfs_buf_readdir(bp, ctx);
8096 if (error == NFSERR_DIRBUFDROPPED)
8097 goto getbuffer;
8098 if (error)
8099 nfs_buf_release(bp, 1);
8100 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8101 if (!nfs_node_lock(adnp)) {
8102 nfs_invaldir(adnp);
8103 nfs_node_unlock(adnp);
8104 }
8105 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8106 if (error == NFSERR_BAD_COOKIE)
8107 error = ENOENT;
8108 }
8109 if (error)
8110 goto out;
8111 }
8112
8113 /* go through all the entries copying/counting */
8114 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8115 for (i=0; i < ndbhp->ndbh_count; i++) {
8116 if (!xattr_protected(dp->d_name)) {
8117 if (uio == NULL) {
8118 *ap->a_size += dp->d_namlen + 1;
8119 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8120 error = ERANGE;
8121 } else {
8122 error = uiomove(dp->d_name, dp->d_namlen+1, uio);
8123 if (error && (error != EFAULT))
8124 error = ERANGE;
8125 }
8126 }
8127 nextcookie = dp->d_seekoff;
8128 dp = NFS_DIRENTRY_NEXT(dp);
8129 }
8130
8131 if (i == ndbhp->ndbh_count) {
8132 /* hit end of buffer, move to next buffer */
8133 lbn = nextcookie;
8134 /* if we also hit EOF, we're done */
8135 if (ISSET(ndbhp->ndbh_flags, NDB_EOF))
8136 done = 1;
8137 }
8138 if (!error && !done && (nextcookie == cookie)) {
8139 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8140 error = EIO;
8141 }
8142 nfs_buf_release(bp, 1);
8143 }
8144 out:
8145 if (adnp)
8146 vnode_put(NFSTOV(adnp));
8147 return (error);
8148 }
8149
8150 #if NAMEDSTREAMS
8151 int
8152 nfs4_vnop_getnamedstream(
8153 struct vnop_getnamedstream_args /* {
8154 struct vnodeop_desc *a_desc;
8155 vnode_t a_vp;
8156 vnode_t *a_svpp;
8157 const char *a_name;
8158 enum nsoperation a_operation;
8159 int a_flags;
8160 vfs_context_t a_context;
8161 } */ *ap)
8162 {
8163 vfs_context_t ctx = ap->a_context;
8164 struct nfsmount *nmp;
8165 struct nfs_vattr nvattr;
8166 struct componentname cn;
8167 nfsnode_t anp;
8168 int error = 0;
8169
8170 nmp = VTONMP(ap->a_vp);
8171 if (nfs_mount_gone(nmp))
8172 return (ENXIO);
8173
8174 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8175 return (ENOTSUP);
8176 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8177 if (error)
8178 return (error);
8179 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8180 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS))
8181 return (ENOATTR);
8182
8183 bzero(&cn, sizeof(cn));
8184 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8185 cn.cn_namelen = strlen(ap->a_name);
8186 cn.cn_nameiop = LOOKUP;
8187 cn.cn_flags = MAKEENTRY;
8188
8189 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8190 0, ctx, &anp, NULL);
8191 if ((!error && !anp) || (error == ENOENT))
8192 error = ENOATTR;
8193 if (!error && anp)
8194 *ap->a_svpp = NFSTOV(anp);
8195 else if (anp)
8196 vnode_put(NFSTOV(anp));
8197 return (error);
8198 }
8199
8200 int
8201 nfs4_vnop_makenamedstream(
8202 struct vnop_makenamedstream_args /* {
8203 struct vnodeop_desc *a_desc;
8204 vnode_t *a_svpp;
8205 vnode_t a_vp;
8206 const char *a_name;
8207 int a_flags;
8208 vfs_context_t a_context;
8209 } */ *ap)
8210 {
8211 vfs_context_t ctx = ap->a_context;
8212 struct nfsmount *nmp;
8213 struct componentname cn;
8214 nfsnode_t anp;
8215 int error = 0;
8216
8217 nmp = VTONMP(ap->a_vp);
8218 if (nfs_mount_gone(nmp))
8219 return (ENXIO);
8220
8221 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8222 return (ENOTSUP);
8223
8224 bzero(&cn, sizeof(cn));
8225 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8226 cn.cn_namelen = strlen(ap->a_name);
8227 cn.cn_nameiop = CREATE;
8228 cn.cn_flags = MAKEENTRY;
8229
8230 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8231 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8232 if ((!error && !anp) || (error == ENOENT))
8233 error = ENOATTR;
8234 if (!error && anp)
8235 *ap->a_svpp = NFSTOV(anp);
8236 else if (anp)
8237 vnode_put(NFSTOV(anp));
8238 return (error);
8239 }
8240
8241 int
8242 nfs4_vnop_removenamedstream(
8243 struct vnop_removenamedstream_args /* {
8244 struct vnodeop_desc *a_desc;
8245 vnode_t a_vp;
8246 vnode_t a_svp;
8247 const char *a_name;
8248 int a_flags;
8249 vfs_context_t a_context;
8250 } */ *ap)
8251 {
8252 struct nfsmount *nmp = VTONMP(ap->a_vp);
8253 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8254 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8255
8256 if (nfs_mount_gone(nmp))
8257 return (ENXIO);
8258
8259 /*
8260 * Given that a_svp is a named stream, checking for
8261 * named attribute support is kinda pointless.
8262 */
8263 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR))
8264 return (ENOTSUP);
8265
8266 return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context));
8267 }
8268
8269 #endif