]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio.h>
50
51 #include <vfs/vfs_support.h>
52
53 #include <sys/vm.h>
54
55 #include <sys/time.h>
56 #include <kern/clock.h>
57 #include <libkern/OSAtomic.h>
58
59 #include <miscfs/fifofs/fifo.h>
60 #include <miscfs/specfs/specdev.h>
61
62 #include <nfs/rpcv2.h>
63 #include <nfs/nfsproto.h>
64 #include <nfs/nfs.h>
65 #include <nfs/nfsnode.h>
66 #include <nfs/nfs_gss.h>
67 #include <nfs/nfsmount.h>
68 #include <nfs/nfs_lock.h>
69 #include <nfs/xdr_subs.h>
70 #include <nfs/nfsm_subs.h>
71
72 #include <net/if.h>
73 #include <netinet/in.h>
74 #include <netinet/in_var.h>
75 #include <vm/vm_kern.h>
76
77 #include <kern/task.h>
78 #include <kern/sched_prim.h>
79
80 int
81 nfs4_access_rpc(nfsnode_t np, u_int32_t *mode, vfs_context_t ctx)
82 {
83 int error = 0, lockerror = ENOENT, status, numops, slot;
84 u_int64_t xid;
85 struct nfsm_chain nmreq, nmrep;
86 struct timeval now;
87 uint32_t access = 0, supported = 0, missing;
88 struct nfsmount *nmp = NFSTONMP(np);
89 int nfsvers = nmp->nm_vers;
90 uid_t uid;
91
92 nfsm_chain_null(&nmreq);
93 nfsm_chain_null(&nmrep);
94
95 // PUTFH, ACCESS, GETATTR
96 numops = 3;
97 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
98 nfsm_chain_add_compound_header(error, &nmreq, "access", numops);
99 numops--;
100 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
101 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
102 numops--;
103 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
104 nfsm_chain_add_32(error, &nmreq, *mode);
105 numops--;
106 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
107 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
108 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
109 nfsm_chain_build_done(error, &nmreq);
110 nfsm_assert(error, (numops == 0), EPROTO);
111 nfsmout_if(error);
112 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
113
114 if ((lockerror = nfs_node_lock(np)))
115 error = lockerror;
116 nfsm_chain_skip_tag(error, &nmrep);
117 nfsm_chain_get_32(error, &nmrep, numops);
118 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
119 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
120 nfsm_chain_get_32(error, &nmrep, supported);
121 nfsm_chain_get_32(error, &nmrep, access);
122 nfsmout_if(error);
123 if ((missing = (*mode & ~supported))) {
124 /* missing support for something(s) we wanted */
125 if (missing & NFS_ACCESS_DELETE) {
126 /*
127 * If the server doesn't report DELETE (possible
128 * on UNIX systems), we'll assume that it is OK
129 * and just let any subsequent delete action fail
130 * if it really isn't deletable.
131 */
132 access |= NFS_ACCESS_DELETE;
133 }
134 }
135 /* Some servers report DELETE support but erroneously give a denied answer. */
136 if ((*mode & NFS_ACCESS_DELETE) && nfs_access_delete && !(access & NFS_ACCESS_DELETE))
137 access |= NFS_ACCESS_DELETE;
138 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
139 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
140 nfsmout_if(error);
141
142 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
143 slot = nfs_node_mode_slot(np, uid, 1);
144 np->n_modeuid[slot] = uid;
145 microuptime(&now);
146 np->n_modestamp[slot] = now.tv_sec;
147 np->n_mode[slot] = access;
148
149 /* pass back the mode returned with this request */
150 *mode = np->n_mode[slot];
151 nfsmout:
152 if (!lockerror)
153 nfs_node_unlock(np);
154 nfsm_chain_cleanup(&nmreq);
155 nfsm_chain_cleanup(&nmrep);
156 return (error);
157 }
158
159 int
160 nfs4_getattr_rpc(
161 nfsnode_t np,
162 mount_t mp,
163 u_char *fhp,
164 size_t fhsize,
165 vfs_context_t ctx,
166 struct nfs_vattr *nvap,
167 u_int64_t *xidp)
168 {
169 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
170 int error = 0, status, nfsvers, numops;
171 struct nfsm_chain nmreq, nmrep;
172
173 if (!nmp)
174 return (ENXIO);
175 nfsvers = nmp->nm_vers;
176
177 nfsm_chain_null(&nmreq);
178 nfsm_chain_null(&nmrep);
179
180 // PUTFH, GETATTR
181 numops = 2;
182 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
183 nfsm_chain_add_compound_header(error, &nmreq, "getattr", numops);
184 numops--;
185 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
186 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
187 numops--;
188 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
189 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
190 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
191 nfsm_chain_build_done(error, &nmreq);
192 nfsm_assert(error, (numops == 0), EPROTO);
193 nfsmout_if(error);
194 error = nfs_request(np, mp, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, xidp, &status);
195
196 nfsm_chain_skip_tag(error, &nmrep);
197 nfsm_chain_get_32(error, &nmrep, numops);
198 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
199 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
200 nfsmout_if(error);
201 NFS_CLEAR_ATTRIBUTES(nvap->nva_bitmap);
202 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL);
203 nfsmout:
204 nfsm_chain_cleanup(&nmreq);
205 nfsm_chain_cleanup(&nmrep);
206 return (error);
207 }
208
209 int
210 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
211 {
212 struct nfsmount *nmp;
213 int error = 0, lockerror = ENOENT, status, numops;
214 uint32_t len = 0;
215 u_int64_t xid;
216 struct nfsm_chain nmreq, nmrep;
217
218 nmp = NFSTONMP(np);
219 if (!nmp)
220 return (ENXIO);
221 nfsm_chain_null(&nmreq);
222 nfsm_chain_null(&nmrep);
223
224 // PUTFH, GETATTR, READLINK
225 numops = 3;
226 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
227 nfsm_chain_add_compound_header(error, &nmreq, "readlink", numops);
228 numops--;
229 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
230 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
231 numops--;
232 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
233 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
234 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
235 numops--;
236 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
237 nfsm_chain_build_done(error, &nmreq);
238 nfsm_assert(error, (numops == 0), EPROTO);
239 nfsmout_if(error);
240 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
241
242 if ((lockerror = nfs_node_lock(np)))
243 error = lockerror;
244 nfsm_chain_skip_tag(error, &nmrep);
245 nfsm_chain_get_32(error, &nmrep, numops);
246 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
247 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
248 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, NULL, &xid);
249 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
250 nfsm_chain_get_32(error, &nmrep, len);
251 nfsmout_if(error);
252 if (len >= *buflenp) {
253 if (np->n_size && (np->n_size < *buflenp))
254 len = np->n_size;
255 else
256 len = *buflenp - 1;
257 }
258 nfsm_chain_get_opaque(error, &nmrep, len, buf);
259 if (!error)
260 *buflenp = len;
261 nfsmout:
262 if (!lockerror)
263 nfs_node_unlock(np);
264 nfsm_chain_cleanup(&nmreq);
265 nfsm_chain_cleanup(&nmrep);
266 return (error);
267 }
268
269 int
270 nfs4_read_rpc_async(
271 nfsnode_t np,
272 off_t offset,
273 size_t len,
274 thread_t thd,
275 kauth_cred_t cred,
276 struct nfsreq_cbinfo *cb,
277 struct nfsreq **reqp)
278 {
279 struct nfsmount *nmp;
280 int error = 0, nfsvers, numops;
281 nfs_stateid stateid;
282 struct nfsm_chain nmreq;
283
284 nmp = NFSTONMP(np);
285 if (!nmp)
286 return (ENXIO);
287 nfsvers = nmp->nm_vers;
288
289 nfsm_chain_null(&nmreq);
290
291 // PUTFH, READ, GETATTR
292 numops = 3;
293 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
294 nfsm_chain_add_compound_header(error, &nmreq, "read", numops);
295 numops--;
296 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
297 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
298 numops--;
299 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
300 nfs_get_stateid(np, thd, cred, &stateid);
301 nfsm_chain_add_stateid(error, &nmreq, &stateid);
302 nfsm_chain_add_64(error, &nmreq, offset);
303 nfsm_chain_add_32(error, &nmreq, len);
304 numops--;
305 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
306 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
307 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
311 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, cb, reqp);
312 nfsmout:
313 nfsm_chain_cleanup(&nmreq);
314 return (error);
315 }
316
317 int
318 nfs4_read_rpc_async_finish(
319 nfsnode_t np,
320 struct nfsreq *req,
321 uio_t uio,
322 size_t *lenp,
323 int *eofp)
324 {
325 struct nfsmount *nmp;
326 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
327 size_t retlen = 0;
328 u_int64_t xid;
329 struct nfsm_chain nmrep;
330
331 nmp = NFSTONMP(np);
332 if (!nmp) {
333 nfs_request_async_cancel(req);
334 return (ENXIO);
335 }
336 nfsvers = nmp->nm_vers;
337
338 nfsm_chain_null(&nmrep);
339
340 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
341 if (error == EINPROGRESS) /* async request restarted */
342 return (error);
343
344 if ((lockerror = nfs_node_lock(np)))
345 error = lockerror;
346 nfsm_chain_skip_tag(error, &nmrep);
347 nfsm_chain_get_32(error, &nmrep, numops);
348 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
349 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
350 nfsm_chain_get_32(error, &nmrep, eof);
351 nfsm_chain_get_32(error, &nmrep, retlen);
352 if (!error) {
353 *lenp = MIN(retlen, *lenp);
354 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
355 }
356 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
357 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
358 if (!lockerror)
359 nfs_node_unlock(np);
360 if (eofp) {
361 if (!eof && !retlen)
362 eof = 1;
363 *eofp = eof;
364 }
365 nfsm_chain_cleanup(&nmrep);
366 return (error);
367 }
368
369 int
370 nfs4_write_rpc_async(
371 nfsnode_t np,
372 uio_t uio,
373 size_t len,
374 thread_t thd,
375 kauth_cred_t cred,
376 int iomode,
377 struct nfsreq_cbinfo *cb,
378 struct nfsreq **reqp)
379 {
380 struct nfsmount *nmp;
381 int error = 0, nfsvers, numops;
382 nfs_stateid stateid;
383 struct nfsm_chain nmreq;
384
385 nmp = NFSTONMP(np);
386 if (!nmp)
387 return (ENXIO);
388 nfsvers = nmp->nm_vers;
389
390 nfsm_chain_null(&nmreq);
391
392 // PUTFH, WRITE, GETATTR
393 numops = 3;
394 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
395 nfsm_chain_add_compound_header(error, &nmreq, "write", numops);
396 numops--;
397 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
398 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
399 numops--;
400 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
401 nfs_get_stateid(np, thd, cred, &stateid);
402 nfsm_chain_add_stateid(error, &nmreq, &stateid);
403 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
404 nfsm_chain_add_32(error, &nmreq, iomode);
405 nfsm_chain_add_32(error, &nmreq, len);
406 if (!error)
407 error = nfsm_chain_add_uio(&nmreq, uio, len);
408 numops--;
409 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
410 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
411 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
412 nfsm_chain_build_done(error, &nmreq);
413 nfsm_assert(error, (numops == 0), EPROTO);
414 nfsmout_if(error);
415
416 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, cb, reqp);
417 nfsmout:
418 nfsm_chain_cleanup(&nmreq);
419 return (error);
420 }
421
422 int
423 nfs4_write_rpc_async_finish(
424 nfsnode_t np,
425 struct nfsreq *req,
426 int *iomodep,
427 size_t *rlenp,
428 uint64_t *wverfp)
429 {
430 struct nfsmount *nmp;
431 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
432 int committed = NFS_WRITE_FILESYNC;
433 size_t rlen = 0;
434 u_int64_t xid, wverf;
435 mount_t mp;
436 struct nfsm_chain nmrep;
437
438 nmp = NFSTONMP(np);
439 if (!nmp) {
440 nfs_request_async_cancel(req);
441 return (ENXIO);
442 }
443 nfsvers = nmp->nm_vers;
444
445 nfsm_chain_null(&nmrep);
446
447 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
448 if (error == EINPROGRESS) /* async request restarted */
449 return (error);
450 nmp = NFSTONMP(np);
451 if (!nmp)
452 error = ENXIO;
453 if (!error && (lockerror = nfs_node_lock(np)))
454 error = lockerror;
455 nfsm_chain_skip_tag(error, &nmrep);
456 nfsm_chain_get_32(error, &nmrep, numops);
457 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
458 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
459 nfsm_chain_get_32(error, &nmrep, rlen);
460 nfsmout_if(error);
461 *rlenp = rlen;
462 if (rlen <= 0)
463 error = NFSERR_IO;
464 nfsm_chain_get_32(error, &nmrep, committed);
465 nfsm_chain_get_64(error, &nmrep, wverf);
466 nfsmout_if(error);
467 if (wverfp)
468 *wverfp = wverf;
469 lck_mtx_lock(&nmp->nm_lock);
470 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
471 nmp->nm_verf = wverf;
472 nmp->nm_state |= NFSSTA_HASWRITEVERF;
473 } else if (nmp->nm_verf != wverf) {
474 nmp->nm_verf = wverf;
475 }
476 lck_mtx_unlock(&nmp->nm_lock);
477 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
478 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
479 nfsmout:
480 if (!lockerror)
481 nfs_node_unlock(np);
482 nfsm_chain_cleanup(&nmrep);
483 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
484 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC))
485 committed = NFS_WRITE_FILESYNC;
486 *iomodep = committed;
487 return (error);
488 }
489
490 int
491 nfs4_remove_rpc(
492 nfsnode_t dnp,
493 char *name,
494 int namelen,
495 thread_t thd,
496 kauth_cred_t cred)
497 {
498 int error = 0, lockerror = ENOENT, remove_error = 0, status;
499 struct nfsmount *nmp;
500 int nfsvers, numops;
501 u_int64_t xid;
502 struct nfsm_chain nmreq, nmrep;
503
504 nmp = NFSTONMP(dnp);
505 if (!nmp)
506 return (ENXIO);
507 nfsvers = nmp->nm_vers;
508 restart:
509 nfsm_chain_null(&nmreq);
510 nfsm_chain_null(&nmrep);
511
512 // PUTFH, REMOVE, GETATTR
513 numops = 3;
514 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
515 nfsm_chain_add_compound_header(error, &nmreq, "remove", numops);
516 numops--;
517 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
518 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
519 numops--;
520 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
521 nfsm_chain_add_string(error, &nmreq, name, namelen);
522 numops--;
523 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
524 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
525 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
526 nfsm_chain_build_done(error, &nmreq);
527 nfsm_assert(error, (numops == 0), EPROTO);
528 nfsmout_if(error);
529
530 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, 0, &nmrep, &xid, &status);
531
532 if ((lockerror = nfs_node_lock(dnp)))
533 error = lockerror;
534 nfsm_chain_skip_tag(error, &nmrep);
535 nfsm_chain_get_32(error, &nmrep, numops);
536 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
537 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
538 remove_error = error;
539 nfsm_chain_check_change_info(error, &nmrep, dnp);
540 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
541 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, NULL, &xid);
542 if (error && !lockerror)
543 NATTRINVALIDATE(dnp);
544 nfsmout:
545 nfsm_chain_cleanup(&nmreq);
546 nfsm_chain_cleanup(&nmrep);
547
548 if (!lockerror) {
549 dnp->n_flag |= NMODIFIED;
550 nfs_node_unlock(dnp);
551 }
552 if (error == NFSERR_GRACE) {
553 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
554 goto restart;
555 }
556
557 return (remove_error);
558 }
559
560 int
561 nfs4_rename_rpc(
562 nfsnode_t fdnp,
563 char *fnameptr,
564 int fnamelen,
565 nfsnode_t tdnp,
566 char *tnameptr,
567 int tnamelen,
568 vfs_context_t ctx)
569 {
570 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
571 struct nfsmount *nmp;
572 u_int64_t xid, savedxid;
573 struct nfsm_chain nmreq, nmrep;
574
575 nmp = NFSTONMP(fdnp);
576 if (!nmp)
577 return (ENXIO);
578 nfsvers = nmp->nm_vers;
579
580 nfsm_chain_null(&nmreq);
581 nfsm_chain_null(&nmrep);
582
583 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
584 numops = 7;
585 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
586 nfsm_chain_add_compound_header(error, &nmreq, "rename", numops);
587 numops--;
588 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
589 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
590 numops--;
591 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
592 numops--;
593 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
594 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
595 numops--;
596 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
597 nfsm_chain_add_string(error, &nmreq, fnameptr, fnamelen);
598 nfsm_chain_add_string(error, &nmreq, tnameptr, tnamelen);
599 numops--;
600 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
601 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
602 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
603 numops--;
604 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
605 numops--;
606 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
607 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
608 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
609 nfsm_chain_build_done(error, &nmreq);
610 nfsm_assert(error, (numops == 0), EPROTO);
611 nfsmout_if(error);
612
613 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
614
615 if ((lockerror = nfs_node_lock2(fdnp, tdnp)))
616 error = lockerror;
617 nfsm_chain_skip_tag(error, &nmrep);
618 nfsm_chain_get_32(error, &nmrep, numops);
619 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
620 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
621 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
622 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
623 nfsm_chain_check_change_info(error, &nmrep, fdnp);
624 nfsm_chain_check_change_info(error, &nmrep, tdnp);
625 /* directory attributes: if we don't get them, make sure to invalidate */
626 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
627 savedxid = xid;
628 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, NULL, &xid);
629 if (error && !lockerror)
630 NATTRINVALIDATE(tdnp);
631 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
632 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
633 xid = savedxid;
634 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, NULL, &xid);
635 if (error && !lockerror)
636 NATTRINVALIDATE(fdnp);
637 nfsmout:
638 nfsm_chain_cleanup(&nmreq);
639 nfsm_chain_cleanup(&nmrep);
640 if (!lockerror) {
641 fdnp->n_flag |= NMODIFIED;
642 tdnp->n_flag |= NMODIFIED;
643 nfs_node_unlock2(fdnp, tdnp);
644 }
645 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
646 if (error == EEXIST)
647 error = 0;
648 return (error);
649 }
650
651 /*
652 * NFS V4 readdir RPC.
653 */
654 int
655 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
656 {
657 struct nfsmount *nmp;
658 int error = 0, lockerror, nfsvers, rdirplus, bigcookies, numops;
659 int i, status, more_entries = 1, eof, bp_dropped = 0;
660 uint32_t nmreaddirsize, nmrsize;
661 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
662 uint64_t cookie, lastcookie, xid, savedxid;
663 struct nfsm_chain nmreq, nmrep, nmrepsave;
664 fhandle_t fh;
665 struct nfs_vattr nvattr, *nvattrp;
666 struct nfs_dir_buf_header *ndbhp;
667 struct direntry *dp;
668 char *padstart, padlen;
669 const char *tag;
670 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
671 struct timeval now;
672
673 nmp = NFSTONMP(dnp);
674 if (!nmp)
675 return (ENXIO);
676 nfsvers = nmp->nm_vers;
677 nmreaddirsize = nmp->nm_readdirsize;
678 nmrsize = nmp->nm_rsize;
679 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
680 rdirplus = ((nfsvers > NFS_VER2) && (nmp->nm_flag & NFSMNT_RDIRPLUS)) ? 1 : 0;
681
682 /*
683 * Set up attribute request for entries.
684 * For READDIRPLUS functionality, get everything.
685 * Otherwise, just get what we need for struct direntry.
686 */
687 if (rdirplus) {
688 tag = "readdirplus";
689 for (i=0; i < NFS_ATTR_BITMAP_LEN; i++)
690 entry_attrs[i] =
691 nfs_getattr_bitmap[i] &
692 nmp->nm_fsattr.nfsa_supp_attr[i];
693 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
694 } else {
695 tag = "readdir";
696 NFS_CLEAR_ATTRIBUTES(entry_attrs);
697 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
698 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
699 }
700 /* XXX NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID); */
701 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
702
703 /* lock to protect access to cookie verifier */
704 if ((lockerror = nfs_node_lock(dnp)))
705 return (lockerror);
706
707 /* determine cookie to use, and move dp to the right offset */
708 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
709 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
710 if (ndbhp->ndbh_count) {
711 for (i=0; i < ndbhp->ndbh_count-1; i++)
712 dp = NFS_DIRENTRY_NEXT(dp);
713 cookie = dp->d_seekoff;
714 dp = NFS_DIRENTRY_NEXT(dp);
715 } else {
716 cookie = bp->nb_lblkno;
717 /* increment with every buffer read */
718 OSAddAtomic(1, &nfsstats.readdir_bios);
719 }
720 lastcookie = cookie;
721
722 /*
723 * The NFS client is responsible for the "." and ".." entries in the
724 * directory. So, we put them at the start of the first buffer.
725 */
726 if ((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) {
727 fh.fh_len = 0;
728 fhlen = rdirplus ? fh.fh_len + 1 : 0;
729 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
730 /* "." */
731 namlen = 1;
732 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
733 if (xlen)
734 bzero(&dp->d_name[namlen+1], xlen);
735 dp->d_namlen = namlen;
736 strlcpy(dp->d_name, ".", namlen+1);
737 dp->d_fileno = dnp->n_vattr.nva_fileid;
738 dp->d_type = DT_DIR;
739 dp->d_reclen = reclen;
740 dp->d_seekoff = 1;
741 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
742 dp = NFS_DIRENTRY_NEXT(dp);
743 padlen = (char*)dp - padstart;
744 if (padlen > 0)
745 bzero(padstart, padlen);
746 if (rdirplus) /* zero out attributes */
747 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
748
749 /* ".." */
750 namlen = 2;
751 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
752 if (xlen)
753 bzero(&dp->d_name[namlen+1], xlen);
754 dp->d_namlen = namlen;
755 strlcpy(dp->d_name, "..", namlen+1);
756 if (dnp->n_parent)
757 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
758 else
759 dp->d_fileno = dnp->n_vattr.nva_fileid;
760 dp->d_type = DT_DIR;
761 dp->d_reclen = reclen;
762 dp->d_seekoff = 2;
763 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
764 dp = NFS_DIRENTRY_NEXT(dp);
765 padlen = (char*)dp - padstart;
766 if (padlen > 0)
767 bzero(padstart, padlen);
768 if (rdirplus) /* zero out attributes */
769 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
770
771 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
772 ndbhp->ndbh_count = 2;
773 }
774
775 /*
776 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
777 * the buffer is full (or we hit EOF). Then put the remainder of the
778 * results in the next buffer(s).
779 */
780 nfsm_chain_null(&nmreq);
781 nfsm_chain_null(&nmrep);
782 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
783
784 // PUTFH, GETATTR, READDIR
785 numops = 3;
786 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
787 nfsm_chain_add_compound_header(error, &nmreq, tag, numops);
788 numops--;
789 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
790 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
791 numops--;
792 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
793 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
794 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
795 numops--;
796 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
797 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
798 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
799 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
800 nfsm_chain_add_32(error, &nmreq, nmrsize);
801 nfsm_chain_add_bitmap(error, &nmreq, entry_attrs, NFS_ATTR_BITMAP_LEN);
802 nfsm_chain_build_done(error, &nmreq);
803 nfsm_assert(error, (numops == 0), EPROTO);
804 nfs_node_unlock(dnp);
805 nfsmout_if(error);
806 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
807
808 if ((lockerror = nfs_node_lock(dnp)))
809 error = lockerror;
810
811 savedxid = xid;
812 nfsm_chain_skip_tag(error, &nmrep);
813 nfsm_chain_get_32(error, &nmrep, numops);
814 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
815 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
816 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, NULL, &xid);
817 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
818 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
819 nfsm_chain_get_32(error, &nmrep, more_entries);
820
821 if (!lockerror) {
822 nfs_node_unlock(dnp);
823 lockerror = ENOENT;
824 }
825 nfsmout_if(error);
826
827 if (rdirplus)
828 microuptime(&now);
829
830 /* loop through the entries packing them into the buffer */
831 while (more_entries) {
832 /* Entry: COOKIE, NAME, FATTR */
833 nfsm_chain_get_64(error, &nmrep, cookie);
834 nfsm_chain_get_32(error, &nmrep, namlen);
835 nfsmout_if(error);
836 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
837 /* we've got a big cookie, make sure flag is set */
838 lck_mtx_lock(&nmp->nm_lock);
839 nmp->nm_state |= NFSSTA_BIGCOOKIES;
840 lck_mtx_unlock(&nmp->nm_lock);
841 bigcookies = 1;
842 }
843 /* just truncate names that don't fit in direntry.d_name */
844 if (namlen <= 0) {
845 error = EBADRPC;
846 goto nfsmout;
847 }
848 if (namlen > (sizeof(dp->d_name)-1)) {
849 skiplen = namlen - sizeof(dp->d_name) + 1;
850 namlen = sizeof(dp->d_name) - 1;
851 } else {
852 skiplen = 0;
853 }
854 /* guess that fh size will be same as parent */
855 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
856 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
857 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
858 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
859 space_needed = reclen + attrlen;
860 space_free = nfs_dir_buf_freespace(bp, rdirplus);
861 if (space_needed > space_free) {
862 /*
863 * We still have entries to pack, but we've
864 * run out of room in the current buffer.
865 * So we need to move to the next buffer.
866 * The block# for the next buffer is the
867 * last cookie in the current buffer.
868 */
869 nextbuffer:
870 ndbhp->ndbh_flags |= NDB_FULL;
871 nfs_buf_release(bp, 0);
872 bp_dropped = 1;
873 bp = NULL;
874 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
875 nfsmout_if(error);
876 /* initialize buffer */
877 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
878 ndbhp->ndbh_flags = 0;
879 ndbhp->ndbh_count = 0;
880 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
881 ndbhp->ndbh_ncgen = dnp->n_ncgen;
882 space_free = nfs_dir_buf_freespace(bp, rdirplus);
883 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
884 /* increment with every buffer read */
885 OSAddAtomic(1, &nfsstats.readdir_bios);
886 }
887 nmrepsave = nmrep;
888 dp->d_fileno = cookie; /* placeholder */
889 dp->d_seekoff = cookie;
890 dp->d_namlen = namlen;
891 dp->d_reclen = reclen;
892 dp->d_type = DT_UNKNOWN;
893 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
894 nfsmout_if(error);
895 dp->d_name[namlen] = '\0';
896 if (skiplen)
897 nfsm_chain_adv(error, &nmrep,
898 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
899 nfsmout_if(error);
900 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
901 NFS_CLEAR_ATTRIBUTES(nvattrp->nva_bitmap);
902 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL);
903 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
904 /* OK, we didn't get attributes, whatever... */
905 if (rdirplus) /* mark the attributes invalid */
906 bzero(nvattrp, sizeof(struct nfs_vattr));
907 else
908 NFS_CLEAR_ATTRIBUTES(nvattrp->nva_bitmap);
909 error = 0;
910 }
911 /* check for more entries after this one */
912 nfsm_chain_get_32(error, &nmrep, more_entries);
913 nfsmout_if(error);
914
915 /* Skip any "." and ".." entries returned from server. */
916 if ((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) {
917 lastcookie = cookie;
918 continue;
919 }
920
921 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE))
922 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
923 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID))
924 dp->d_fileno = nvattrp->nva_fileid;
925 if (rdirplus) {
926 /* fileid is already in d_fileno, so stash xid in attrs */
927 nvattrp->nva_fileid = savedxid;
928 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
929 fhlen = fh.fh_len + 1;
930 xlen = fhlen + sizeof(time_t);
931 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
932 space_needed = reclen + attrlen;
933 if (space_needed > space_free) {
934 /* didn't actually have the room... move on to next buffer */
935 nmrep = nmrepsave;
936 goto nextbuffer;
937 }
938 /* pack the file handle into the record */
939 dp->d_name[dp->d_namlen+1] = fh.fh_len;
940 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len);
941 } else {
942 /* mark the file handle invalid */
943 fh.fh_len = 0;
944 fhlen = fh.fh_len + 1;
945 xlen = fhlen + sizeof(time_t);
946 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
947 bzero(&dp->d_name[dp->d_namlen+1], fhlen);
948 }
949 *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec;
950 dp->d_reclen = reclen;
951 }
952 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
953 ndbhp->ndbh_count++;
954 lastcookie = cookie;
955
956 /* advance to next direntry in buffer */
957 dp = NFS_DIRENTRY_NEXT(dp);
958 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
959 /* zero out the pad bytes */
960 padlen = (char*)dp - padstart;
961 if (padlen > 0)
962 bzero(padstart, padlen);
963 }
964 /* Finally, get the eof boolean */
965 nfsm_chain_get_32(error, &nmrep, eof);
966 nfsmout_if(error);
967 if (eof) {
968 ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF);
969 nfs_node_lock_force(dnp);
970 dnp->n_eofcookie = lastcookie;
971 nfs_node_unlock(dnp);
972 } else {
973 more_entries = 1;
974 }
975 if (bp_dropped) {
976 nfs_buf_release(bp, 0);
977 bp = NULL;
978 break;
979 }
980 if ((lockerror = nfs_node_lock(dnp)))
981 error = lockerror;
982 nfsmout_if(error);
983 nfsm_chain_cleanup(&nmrep);
984 nfsm_chain_null(&nmreq);
985 }
986 nfsmout:
987 if (bp_dropped && bp)
988 nfs_buf_release(bp, 0);
989 if (!lockerror)
990 nfs_node_unlock(dnp);
991 nfsm_chain_cleanup(&nmreq);
992 nfsm_chain_cleanup(&nmrep);
993 return (bp_dropped ? NFSERR_DIRBUFDROPPED : error);
994 }
995
996 int
997 nfs4_lookup_rpc_async(
998 nfsnode_t dnp,
999 char *name,
1000 int namelen,
1001 vfs_context_t ctx,
1002 struct nfsreq **reqp)
1003 {
1004 int error = 0, isdotdot = 0, getattrs = 1, nfsvers, numops;
1005 struct nfsm_chain nmreq;
1006 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1007 struct nfsmount *nmp;
1008
1009 nmp = NFSTONMP(dnp);
1010 if (!nmp)
1011 return (ENXIO);
1012 nfsvers = nmp->nm_vers;
1013
1014 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2))
1015 isdotdot = 1;
1016
1017 nfsm_chain_null(&nmreq);
1018
1019 // PUTFH, GETATTR, LOOKUP(P), GETATTR (FH)
1020 numops = getattrs ? 4 : 3;
1021 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1022 nfsm_chain_add_compound_header(error, &nmreq, "lookup", numops);
1023 numops--;
1024 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1025 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1026 numops--;
1027 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1028 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
1029 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
1030 numops--;
1031 if (isdotdot) {
1032 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1033 } else {
1034 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1035 nfsm_chain_add_string(error, &nmreq, name, namelen);
1036 }
1037 if (getattrs) {
1038 numops--;
1039 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1040 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1041 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
1042 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
1043 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
1044 }
1045 nfsm_chain_build_done(error, &nmreq);
1046 nfsm_assert(error, (numops == 0), EPROTO);
1047 nfsmout_if(error);
1048 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1049 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, reqp);
1050 nfsmout:
1051 nfsm_chain_cleanup(&nmreq);
1052 return (error);
1053 }
1054
1055 int
1056 nfs4_lookup_rpc_async_finish(
1057 nfsnode_t dnp,
1058 __unused vfs_context_t ctx,
1059 struct nfsreq *req,
1060 u_int64_t *xidp,
1061 fhandle_t *fhp,
1062 struct nfs_vattr *nvap)
1063 {
1064 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
1065 uint32_t val = 0;
1066 u_int64_t xid;
1067 struct nfsmount *nmp;
1068 struct nfsm_chain nmrep;
1069
1070 nmp = NFSTONMP(dnp);
1071 nfsvers = nmp->nm_vers;
1072
1073 nfsm_chain_null(&nmrep);
1074
1075 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1076
1077 if ((lockerror = nfs_node_lock(dnp)))
1078 error = lockerror;
1079 nfsm_chain_skip_tag(error, &nmrep);
1080 nfsm_chain_get_32(error, &nmrep, numops);
1081 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1082 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1083 if (xidp)
1084 *xidp = xid;
1085 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, NULL, &xid);
1086
1087 // nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1088 nfsm_chain_get_32(error, &nmrep, val);
1089 nfsm_assert(error, (val == NFS_OP_LOOKUPP) || (val == NFS_OP_LOOKUP), EBADRPC);
1090 nfsm_chain_get_32(error, &nmrep, val);
1091 nfsm_assert(error, (val == NFS_OK), val);
1092
1093 nfsmout_if(error || !fhp || !nvap);
1094 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1095 nfsmout_if(error);
1096 NFS_CLEAR_ATTRIBUTES(nvap->nva_bitmap);
1097 error = nfs4_parsefattr(&nmrep, NULL, nvap, fhp, NULL);
1098 if (!NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1099 error = EBADRPC;
1100 goto nfsmout;
1101 }
1102 nfsmout:
1103 if (!lockerror)
1104 nfs_node_unlock(dnp);
1105 nfsm_chain_cleanup(&nmrep);
1106 return (error);
1107 }
1108
1109 int
1110 nfs4_commit_rpc(
1111 nfsnode_t np,
1112 u_int64_t offset,
1113 u_int64_t count,
1114 kauth_cred_t cred)
1115 {
1116 struct nfsmount *nmp;
1117 int error = 0, lockerror, status, nfsvers, numops;
1118 u_int64_t xid, wverf;
1119 uint32_t count32;
1120 struct nfsm_chain nmreq, nmrep;
1121
1122 nmp = NFSTONMP(np);
1123 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1124 if (!nmp)
1125 return (ENXIO);
1126 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF))
1127 return (0);
1128 nfsvers = nmp->nm_vers;
1129
1130 if (count > UINT32_MAX)
1131 count32 = 0;
1132 else
1133 count32 = count;
1134
1135 nfsm_chain_null(&nmreq);
1136 nfsm_chain_null(&nmrep);
1137
1138 // PUTFH, COMMIT, GETATTR
1139 numops = 3;
1140 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1141 nfsm_chain_add_compound_header(error, &nmreq, "commit", numops);
1142 numops--;
1143 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1144 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1145 numops--;
1146 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1147 nfsm_chain_add_64(error, &nmreq, offset);
1148 nfsm_chain_add_32(error, &nmreq, count32);
1149 numops--;
1150 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1151 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
1152 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
1153 nfsm_chain_build_done(error, &nmreq);
1154 nfsm_assert(error, (numops == 0), EPROTO);
1155 nfsmout_if(error);
1156 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1157 current_thread(), cred, 0, &nmrep, &xid, &status);
1158
1159 if ((lockerror = nfs_node_lock(np)))
1160 error = lockerror;
1161 nfsm_chain_skip_tag(error, &nmrep);
1162 nfsm_chain_get_32(error, &nmrep, numops);
1163 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1164 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1165 nfsm_chain_get_64(error, &nmrep, wverf);
1166 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1167 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
1168 if (!lockerror)
1169 nfs_node_unlock(np);
1170 nfsmout_if(error);
1171 lck_mtx_lock(&nmp->nm_lock);
1172 if (nmp->nm_verf != wverf) {
1173 nmp->nm_verf = wverf;
1174 error = NFSERR_STALEWRITEVERF;
1175 }
1176 lck_mtx_unlock(&nmp->nm_lock);
1177 nfsmout:
1178 nfsm_chain_cleanup(&nmreq);
1179 nfsm_chain_cleanup(&nmrep);
1180 return (error);
1181 }
1182
1183 int
1184 nfs4_pathconf_rpc(
1185 nfsnode_t np,
1186 struct nfs_fsattr *nfsap,
1187 vfs_context_t ctx)
1188 {
1189 u_int64_t xid;
1190 int error = 0, lockerror, status, nfsvers, numops;
1191 struct nfsm_chain nmreq, nmrep;
1192 struct nfsmount *nmp = NFSTONMP(np);
1193 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1194 struct nfs_vattr nvattr;
1195
1196 if (!nmp)
1197 return (ENXIO);
1198 nfsvers = nmp->nm_vers;
1199
1200 nfsm_chain_null(&nmreq);
1201 nfsm_chain_null(&nmrep);
1202
1203 /* NFSv4: fetch "pathconf" info for this node */
1204 // PUTFH, GETATTR
1205 numops = 2;
1206 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1207 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", numops);
1208 numops--;
1209 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1210 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1211 numops--;
1212 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1213 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1214 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1215 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1216 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1217 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1218 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1219 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1220 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
1221 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
1222 nfsm_chain_build_done(error, &nmreq);
1223 nfsm_assert(error, (numops == 0), EPROTO);
1224 nfsmout_if(error);
1225 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
1226
1227 nfsm_chain_skip_tag(error, &nmrep);
1228 nfsm_chain_get_32(error, &nmrep, numops);
1229 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1230 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1231 nfsmout_if(error);
1232 NFS_CLEAR_ATTRIBUTES(nvattr.nva_bitmap);
1233 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL);
1234 nfsmout_if(error);
1235 if ((lockerror = nfs_node_lock(np)))
1236 error = lockerror;
1237 if (!error)
1238 nfs_loadattrcache(np, &nvattr, &xid, 0);
1239 if (!lockerror)
1240 nfs_node_unlock(np);
1241 nfsmout:
1242 nfsm_chain_cleanup(&nmreq);
1243 nfsm_chain_cleanup(&nmrep);
1244 return (error);
1245 }
1246
1247 int
1248 nfs4_vnop_getattr(
1249 struct vnop_getattr_args /* {
1250 struct vnodeop_desc *a_desc;
1251 vnode_t a_vp;
1252 struct vnode_attr *a_vap;
1253 vfs_context_t a_context;
1254 } */ *ap)
1255 {
1256 struct vnode_attr *vap = ap->a_vap;
1257 struct nfs_vattr nva;
1258 int error;
1259
1260 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED);
1261 if (error)
1262 return (error);
1263
1264 /* copy what we have in nva to *a_vap */
1265 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1266 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1267 VATTR_RETURN(vap, va_rdev, rdev);
1268 }
1269 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS))
1270 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1271 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE))
1272 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1273 // VATTR_RETURN(vap, va_data_alloc, ???);
1274 // VATTR_RETURN(vap, va_total_size, ???);
1275 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED))
1276 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1277 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER))
1278 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1279 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP))
1280 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1281 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE))
1282 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1283 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1284 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN)) {
1285 uint32_t flags = 0;
1286 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE))
1287 flags |= SF_ARCHIVED;
1288 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN))
1289 flags |= UF_HIDDEN;
1290 VATTR_RETURN(vap, va_flags, flags);
1291 }
1292 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1293 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1294 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1295 VATTR_SET_SUPPORTED(vap, va_create_time);
1296 }
1297 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1298 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1299 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1300 VATTR_SET_SUPPORTED(vap, va_access_time);
1301 }
1302 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1303 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1304 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1305 VATTR_SET_SUPPORTED(vap, va_modify_time);
1306 }
1307 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1308 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1309 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1310 VATTR_SET_SUPPORTED(vap, va_change_time);
1311 }
1312 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1313 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1314 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1315 VATTR_SET_SUPPORTED(vap, va_backup_time);
1316 }
1317 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID))
1318 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1319 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE))
1320 VATTR_RETURN(vap, va_type, nva.nva_type);
1321 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE))
1322 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1323
1324 // other attrs we might support someday:
1325 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1326 // struct kauth_acl *va_acl; /* access control list */
1327 // guid_t va_uuuid; /* file owner UUID */
1328 // guid_t va_guuid; /* file group UUID */
1329
1330 return (error);
1331 }
1332
1333 int
1334 nfs4_setattr_rpc(
1335 nfsnode_t np,
1336 struct vnode_attr *vap,
1337 vfs_context_t ctx)
1338 {
1339 struct nfsmount *nmp = NFSTONMP(np);
1340 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
1341 u_int64_t xid, nextxid;
1342 struct nfsm_chain nmreq, nmrep;
1343 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1344 nfs_stateid stateid;
1345
1346 if (!nmp)
1347 return (ENXIO);
1348 nfsvers = nmp->nm_vers;
1349
1350 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) {
1351 /* we don't support setting unsupported flags (duh!) */
1352 if (vap->va_active & ~VNODE_ATTR_va_flags)
1353 return (EINVAL); /* return EINVAL if other attributes also set */
1354 else
1355 return (ENOTSUP); /* return ENOTSUP for chflags(2) */
1356 }
1357
1358 nfsm_chain_null(&nmreq);
1359 nfsm_chain_null(&nmrep);
1360
1361 // PUTFH, SETATTR, GETATTR
1362 numops = 3;
1363 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1364 nfsm_chain_add_compound_header(error, &nmreq, "setattr", numops);
1365 numops--;
1366 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1367 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1368 numops--;
1369 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1370 if (VATTR_IS_ACTIVE(vap, va_data_size))
1371 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1372 else
1373 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1374 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1375 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1376 numops--;
1377 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1378 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
1379 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
1380 nfsm_chain_build_done(error, &nmreq);
1381 nfsm_assert(error, (numops == 0), EPROTO);
1382 nfsmout_if(error);
1383 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
1384
1385 if ((lockerror = nfs_node_lock(np)))
1386 error = lockerror;
1387 nfsm_chain_skip_tag(error, &nmrep);
1388 nfsm_chain_get_32(error, &nmrep, numops);
1389 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1390 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1391 bmlen = NFS_ATTR_BITMAP_LEN;
1392 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
1393 nfsmout_if(error);
1394 nfs_vattr_set_supported(bitmap, vap);
1395 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1396 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
1397 if (error)
1398 NATTRINVALIDATE(np);
1399 /*
1400 * We just changed the attributes and we want to make sure that we
1401 * see the latest attributes. Get the next XID. If it's not the
1402 * next XID after the SETATTR XID, then it's possible that another
1403 * RPC was in flight at the same time and it might put stale attributes
1404 * in the cache. In that case, we invalidate the attributes and set
1405 * the attribute cache XID to guarantee that newer attributes will
1406 * get loaded next.
1407 */
1408 nextxid = 0;
1409 nfs_get_xid(&nextxid);
1410 if (nextxid != (xid + 1)) {
1411 np->n_xid = nextxid;
1412 NATTRINVALIDATE(np);
1413 }
1414 nfsmout:
1415 if (!lockerror)
1416 nfs_node_unlock(np);
1417 nfsm_chain_cleanup(&nmreq);
1418 nfsm_chain_cleanup(&nmrep);
1419 return (error);
1420 }
1421
1422 /*
1423 * Wait for any pending recovery to complete.
1424 */
1425 int
1426 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1427 {
1428 struct timespec ts = { 1, 0 };
1429 int error = 0, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
1430
1431 lck_mtx_lock(&nmp->nm_lock);
1432 while (nmp->nm_state & NFSSTA_RECOVER) {
1433 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1434 break;
1435 nfs_mount_sock_thread_wake(nmp);
1436 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1437 }
1438 lck_mtx_unlock(&nmp->nm_lock);
1439
1440 return (error);
1441 }
1442
1443 /*
1444 * We're about to use/manipulate NFS mount's open/lock state.
1445 * Wait for any pending state recovery to complete, then
1446 * mark the state as being in use (which will hold off
1447 * the recovery thread until we're done).
1448 */
1449 int
1450 nfs_mount_state_in_use_start(struct nfsmount *nmp)
1451 {
1452 struct timespec ts = { 1, 0 };
1453 int error = 0, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
1454
1455 if (!nmp)
1456 return (ENXIO);
1457 lck_mtx_lock(&nmp->nm_lock);
1458 while (nmp->nm_state & NFSSTA_RECOVER) {
1459 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))
1460 break;
1461 nfs_mount_sock_thread_wake(nmp);
1462 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts);
1463 }
1464 if (!error)
1465 nmp->nm_stateinuse++;
1466 lck_mtx_unlock(&nmp->nm_lock);
1467
1468 return (error);
1469 }
1470
1471 /*
1472 * We're done using/manipulating the NFS mount's open/lock
1473 * state. If the given error indicates that recovery should
1474 * be performed, we'll initiate recovery.
1475 */
1476 int
1477 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1478 {
1479 int restart = nfs_mount_state_error_should_restart(error);
1480
1481 if (!nmp)
1482 return (restart);
1483 lck_mtx_lock(&nmp->nm_lock);
1484 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1485 if (!(nmp->nm_state & NFSSTA_RECOVER)) {
1486 printf("nfs_mount_state_in_use_end: error %d, initiating recovery\n", error);
1487 nmp->nm_state |= NFSSTA_RECOVER;
1488 nfs_mount_sock_thread_wake(nmp);
1489 }
1490 }
1491 if (nmp->nm_stateinuse > 0)
1492 nmp->nm_stateinuse--;
1493 else
1494 panic("NFS mount state in use count underrun");
1495 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER))
1496 wakeup(&nmp->nm_stateinuse);
1497 lck_mtx_unlock(&nmp->nm_lock);
1498 if (error == NFSERR_GRACE)
1499 tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz);
1500
1501 return (restart);
1502 }
1503
1504 /*
1505 * Does the error mean we should restart/redo a state-related operation?
1506 */
1507 int
1508 nfs_mount_state_error_should_restart(int error)
1509 {
1510 switch (error) {
1511 case NFSERR_STALE_STATEID:
1512 case NFSERR_STALE_CLIENTID:
1513 case NFSERR_ADMIN_REVOKED:
1514 case NFSERR_EXPIRED:
1515 case NFSERR_OLD_STATEID:
1516 case NFSERR_BAD_STATEID:
1517 case NFSERR_GRACE:
1518 return (1);
1519 }
1520 return (0);
1521 }
1522
1523 /*
1524 * In some cases we may want to limit how many times we restart a
1525 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1526 * Base the limit on the lease (as long as it's not too short).
1527 */
1528 uint
1529 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1530 {
1531 return (MAX(nmp->nm_fsattr.nfsa_lease, 60));
1532 }
1533
1534
1535 /*
1536 * Mark an NFS node's open state as busy.
1537 */
1538 int
1539 nfs_open_state_set_busy(nfsnode_t np, vfs_context_t ctx)
1540 {
1541 struct nfsmount *nmp;
1542 thread_t thd = vfs_context_thread(ctx);
1543 struct timespec ts = {2, 0};
1544 int error = 0, slpflag;
1545
1546 nmp = NFSTONMP(np);
1547 if (!nmp)
1548 return (ENXIO);
1549 slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
1550
1551 lck_mtx_lock(&np->n_openlock);
1552 while (np->n_openflags & N_OPENBUSY) {
1553 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1554 break;
1555 np->n_openflags |= N_OPENWANT;
1556 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1557 }
1558 if (!error)
1559 np->n_openflags |= N_OPENBUSY;
1560 lck_mtx_unlock(&np->n_openlock);
1561
1562 return (error);
1563 }
1564
1565 /*
1566 * Clear an NFS node's open state busy flag and wake up
1567 * anyone wanting it.
1568 */
1569 void
1570 nfs_open_state_clear_busy(nfsnode_t np)
1571 {
1572 int wanted;
1573
1574 lck_mtx_lock(&np->n_openlock);
1575 if (!(np->n_openflags & N_OPENBUSY))
1576 panic("nfs_open_state_clear_busy");
1577 wanted = (np->n_openflags & N_OPENWANT);
1578 np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT);
1579 lck_mtx_unlock(&np->n_openlock);
1580 if (wanted)
1581 wakeup(&np->n_openflags);
1582 }
1583
1584 /*
1585 * Search a mount's open owner list for the owner for this credential.
1586 * If not found and "alloc" is set, then allocate a new one.
1587 */
1588 struct nfs_open_owner *
1589 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1590 {
1591 uid_t uid = kauth_cred_getuid(cred);
1592 struct nfs_open_owner *noop, *newnoop = NULL;
1593
1594 tryagain:
1595 lck_mtx_lock(&nmp->nm_lock);
1596 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1597 if (kauth_cred_getuid(noop->noo_cred) == uid)
1598 break;
1599 }
1600
1601 if (!noop && !newnoop && alloc) {
1602 lck_mtx_unlock(&nmp->nm_lock);
1603 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1604 if (!newnoop)
1605 return (NULL);
1606 bzero(newnoop, sizeof(*newnoop));
1607 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1608 newnoop->noo_mount = nmp;
1609 kauth_cred_ref(cred);
1610 newnoop->noo_cred = cred;
1611 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1612 TAILQ_INIT(&newnoop->noo_opens);
1613 goto tryagain;
1614 }
1615 if (!noop && newnoop) {
1616 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1617 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1618 noop = newnoop;
1619 }
1620 lck_mtx_unlock(&nmp->nm_lock);
1621
1622 if (newnoop && (noop != newnoop))
1623 nfs_open_owner_destroy(newnoop);
1624
1625 if (noop)
1626 nfs_open_owner_ref(noop);
1627
1628 return (noop);
1629 }
1630
1631 /*
1632 * destroy an open owner that's no longer needed
1633 */
1634 void
1635 nfs_open_owner_destroy(struct nfs_open_owner *noop)
1636 {
1637 if (noop->noo_cred)
1638 kauth_cred_unref(&noop->noo_cred);
1639 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
1640 FREE(noop, M_TEMP);
1641 }
1642
1643 /*
1644 * acquire a reference count on an open owner
1645 */
1646 void
1647 nfs_open_owner_ref(struct nfs_open_owner *noop)
1648 {
1649 lck_mtx_lock(&noop->noo_lock);
1650 noop->noo_refcnt++;
1651 lck_mtx_unlock(&noop->noo_lock);
1652 }
1653
1654 /*
1655 * drop a reference count on an open owner and destroy it if
1656 * it is no longer referenced and no longer on the mount's list.
1657 */
1658 void
1659 nfs_open_owner_rele(struct nfs_open_owner *noop)
1660 {
1661 lck_mtx_lock(&noop->noo_lock);
1662 if (noop->noo_refcnt < 1)
1663 panic("nfs_open_owner_rele: no refcnt");
1664 noop->noo_refcnt--;
1665 if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1666 panic("nfs_open_owner_rele: busy");
1667 /* XXX we may potentially want to clean up idle/unused open owner structures */
1668 if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
1669 lck_mtx_unlock(&noop->noo_lock);
1670 return;
1671 }
1672 /* owner is no longer referenced or linked to mount, so destroy it */
1673 lck_mtx_unlock(&noop->noo_lock);
1674 nfs_open_owner_destroy(noop);
1675 }
1676
1677 /*
1678 * Mark an open owner as busy because we are about to
1679 * start an operation that uses and updates open owner state.
1680 */
1681 int
1682 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
1683 {
1684 struct nfsmount *nmp;
1685 struct timespec ts = {2, 0};
1686 int error = 0, slpflag;
1687
1688 nmp = noop->noo_mount;
1689 if (!nmp)
1690 return (ENXIO);
1691 slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
1692
1693 lck_mtx_lock(&noop->noo_lock);
1694 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
1695 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1696 break;
1697 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
1698 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
1699 }
1700 if (!error)
1701 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
1702 lck_mtx_unlock(&noop->noo_lock);
1703
1704 return (error);
1705 }
1706
1707 /*
1708 * Clear the busy flag on an open owner and wake up anyone waiting
1709 * to mark it busy.
1710 */
1711 void
1712 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
1713 {
1714 int wanted;
1715
1716 lck_mtx_lock(&noop->noo_lock);
1717 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY))
1718 panic("nfs_open_owner_clear_busy");
1719 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
1720 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT);
1721 lck_mtx_unlock(&noop->noo_lock);
1722 if (wanted)
1723 wakeup(noop);
1724 }
1725
1726 /*
1727 * Given an open/lock owner and an error code, increment the
1728 * sequence ID if appropriate.
1729 */
1730 void
1731 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
1732 {
1733 switch (error) {
1734 case NFSERR_STALE_CLIENTID:
1735 case NFSERR_STALE_STATEID:
1736 case NFSERR_OLD_STATEID:
1737 case NFSERR_BAD_STATEID:
1738 case NFSERR_BAD_SEQID:
1739 case NFSERR_BADXDR:
1740 case NFSERR_RESOURCE:
1741 case NFSERR_NOFILEHANDLE:
1742 /* do not increment the open seqid on these errors */
1743 return;
1744 }
1745 if (noop)
1746 noop->noo_seqid++;
1747 if (nlop)
1748 nlop->nlo_seqid++;
1749 }
1750
1751 /*
1752 * Search a node's open file list for any conflicts with this request.
1753 * Also find this open owner's open file structure.
1754 * If not found and "alloc" is set, then allocate one.
1755 */
1756 int
1757 nfs_open_file_find(
1758 nfsnode_t np,
1759 struct nfs_open_owner *noop,
1760 struct nfs_open_file **nofpp,
1761 uint32_t accessMode,
1762 uint32_t denyMode,
1763 int alloc)
1764 {
1765 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
1766
1767 if (!np)
1768 goto alloc;
1769 tryagain:
1770 lck_mtx_lock(&np->n_openlock);
1771 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
1772 if (nofp2->nof_owner == noop) {
1773 nofp = nofp2;
1774 if (!accessMode)
1775 break;
1776 }
1777 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
1778 /* This request conflicts with an existing open on this client. */
1779 lck_mtx_unlock(&np->n_openlock);
1780 *nofpp = NULL;
1781 return (EACCES);
1782 }
1783 }
1784
1785 /*
1786 * If this open owner doesn't have an open
1787 * file structure yet, we create one for it.
1788 */
1789 if (!nofp && !newnofp && alloc) {
1790 lck_mtx_unlock(&np->n_openlock);
1791 alloc:
1792 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
1793 if (!newnofp) {
1794 *nofpp = NULL;
1795 return (ENOMEM);
1796 }
1797 bzero(newnofp, sizeof(*newnofp));
1798 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
1799 newnofp->nof_owner = noop;
1800 nfs_open_owner_ref(noop);
1801 newnofp->nof_np = np;
1802 lck_mtx_lock(&noop->noo_lock);
1803 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
1804 lck_mtx_unlock(&noop->noo_lock);
1805 if (np)
1806 goto tryagain;
1807 }
1808 if (!nofp && newnofp) {
1809 if (np)
1810 TAILQ_INSERT_HEAD(&np->n_opens, newnofp, nof_link);
1811 nofp = newnofp;
1812 }
1813 if (np)
1814 lck_mtx_unlock(&np->n_openlock);
1815
1816 if (newnofp && (nofp != newnofp))
1817 nfs_open_file_destroy(newnofp);
1818
1819 *nofpp = nofp;
1820 return (nofp ? 0 : ESRCH);
1821 }
1822
1823 /*
1824 * Destroy an open file structure.
1825 */
1826 void
1827 nfs_open_file_destroy(struct nfs_open_file *nofp)
1828 {
1829 lck_mtx_lock(&nofp->nof_owner->noo_lock);
1830 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
1831 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
1832 nfs_open_owner_rele(nofp->nof_owner);
1833 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
1834 FREE(nofp, M_TEMP);
1835 }
1836
1837 /*
1838 * Mark an open file as busy because we are about to
1839 * start an operation that uses and updates open file state.
1840 */
1841 int
1842 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
1843 {
1844 struct nfsmount *nmp;
1845 struct timespec ts = {2, 0};
1846 int error = 0, slpflag;
1847
1848 nmp = nofp->nof_owner->noo_mount;
1849 if (!nmp)
1850 return (ENXIO);
1851 slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
1852
1853 lck_mtx_lock(&nofp->nof_lock);
1854 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
1855 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
1856 break;
1857 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
1858 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
1859 }
1860 if (!error)
1861 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
1862 lck_mtx_unlock(&nofp->nof_lock);
1863
1864 return (error);
1865 }
1866
1867 /*
1868 * Clear the busy flag on an open file and wake up anyone waiting
1869 * to mark it busy.
1870 */
1871 void
1872 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
1873 {
1874 int wanted;
1875
1876 lck_mtx_lock(&nofp->nof_lock);
1877 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY))
1878 panic("nfs_open_file_clear_busy");
1879 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
1880 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT);
1881 lck_mtx_unlock(&nofp->nof_lock);
1882 if (wanted)
1883 wakeup(nofp);
1884 }
1885
1886 /*
1887 * Get the current (delegation, lock, open, default) stateid for this node.
1888 * If node has a delegation, use that stateid.
1889 * If pid has a lock, use the lockowner's stateid.
1890 * Or use the open file's stateid.
1891 * If no open file, use a default stateid of all ones.
1892 */
1893 void
1894 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
1895 {
1896 struct nfsmount *nmp = NFSTONMP(np);
1897 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_thread(); // XXX async I/O requests don't have a thread
1898 struct nfs_open_owner *noop = NULL;
1899 struct nfs_open_file *nofp = NULL;
1900 struct nfs_lock_owner *nlop = NULL;
1901 nfs_stateid *s = NULL;
1902
1903 if (np->n_openflags & N_DELEG_MASK)
1904 s = &np->n_dstateid;
1905 else if (p)
1906 nlop = nfs_lock_owner_find(np, p, 0);
1907 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
1908 /* we hold locks, use lock stateid */
1909 s = &nlop->nlo_stateid;
1910 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
1911 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
1912 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
1913 nofp->nof_access) {
1914 /* we (should) have the file open, use open stateid */
1915 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)
1916 nfs4_reopen(nofp, thd);
1917 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
1918 s = &nofp->nof_stateid;
1919 }
1920
1921 if (s) {
1922 sid->seqid = s->seqid;
1923 sid->other[0] = s->other[0];
1924 sid->other[1] = s->other[1];
1925 sid->other[2] = s->other[2];
1926 } else {
1927 const char *vname = vnode_getname(NFSTOV(np));
1928 printf("nfs_get_stateid: no stateid for %s\n", vname ? vname : "???");
1929 vnode_putname(vname);
1930 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
1931 }
1932 if (nlop)
1933 nfs_lock_owner_rele(nlop);
1934 if (noop)
1935 nfs_open_owner_rele(noop);
1936 }
1937
1938 /*
1939 * We always send the open RPC even if this open's mode is a subset of all
1940 * the existing opens. This makes sure that we will always be able to do a
1941 * downgrade to any of the open modes.
1942 *
1943 * Note: local conflicts should have already been checked. (nfs_open_file_find)
1944 */
1945 int
1946 nfs4_open(
1947 nfsnode_t np,
1948 struct nfs_open_file *nofp,
1949 uint32_t accessMode,
1950 uint32_t denyMode,
1951 vfs_context_t ctx)
1952 {
1953 vnode_t vp = NFSTOV(np);
1954 vnode_t dvp = NULL;
1955 struct componentname cn;
1956 const char *vname = NULL;
1957 size_t namelen;
1958 char smallname[128];
1959 char *filename = NULL;
1960 int error = 0, readtoo = 0;
1961
1962 dvp = vnode_getparent(vp);
1963 vname = vnode_getname(vp);
1964 if (!dvp || !vname) {
1965 error = EIO;
1966 goto out;
1967 }
1968 filename = &smallname[0];
1969 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
1970 if (namelen >= sizeof(smallname)) {
1971 namelen++; /* snprintf result doesn't include '\0' */
1972 MALLOC(filename, char *, namelen, M_TEMP, M_WAITOK);
1973 if (!filename) {
1974 error = ENOMEM;
1975 goto out;
1976 }
1977 snprintf(filename, namelen, "%s", vname);
1978 }
1979 bzero(&cn, sizeof(cn));
1980 cn.cn_nameptr = filename;
1981 cn.cn_namelen = namelen;
1982
1983 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
1984 /*
1985 * Try to open it for read access too,
1986 * so the buffer cache can read data.
1987 */
1988 readtoo = 1;
1989 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
1990 }
1991 tryagain:
1992 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
1993 if (error) {
1994 if (!nfs_mount_state_error_should_restart(error) && readtoo) {
1995 /* try again without the extra read access */
1996 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
1997 readtoo = 0;
1998 goto tryagain;
1999 }
2000 goto out;
2001 }
2002 nofp->nof_access |= accessMode;
2003 nofp->nof_deny |= denyMode;
2004
2005 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2006 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2007 nofp->nof_r++;
2008 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2009 nofp->nof_w++;
2010 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2011 nofp->nof_rw++;
2012 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2013 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2014 nofp->nof_r_dw++;
2015 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2016 nofp->nof_w_dw++;
2017 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2018 nofp->nof_rw_dw++;
2019 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2020 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ)
2021 nofp->nof_r_drw++;
2022 else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE)
2023 nofp->nof_w_drw++;
2024 else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH)
2025 nofp->nof_rw_drw++;
2026 }
2027 nofp->nof_opencnt++;
2028 out:
2029 if (filename && (filename != &smallname[0]))
2030 FREE(filename, M_TEMP);
2031 if (vname)
2032 vnode_putname(vname);
2033 if (dvp != NULLVP)
2034 vnode_put(dvp);
2035 return (error);
2036 }
2037
2038
2039 int
2040 nfs4_vnop_open(
2041 struct vnop_open_args /* {
2042 struct vnodeop_desc *a_desc;
2043 vnode_t a_vp;
2044 int a_mode;
2045 vfs_context_t a_context;
2046 } */ *ap)
2047 {
2048 vfs_context_t ctx = ap->a_context;
2049 vnode_t vp = ap->a_vp;
2050 nfsnode_t np = VTONFS(vp);
2051 struct nfsmount *nmp;
2052 int error, accessMode, denyMode, opened = 0;
2053 struct nfs_open_owner *noop = NULL;
2054 struct nfs_open_file *nofp = NULL;
2055
2056 if (!(ap->a_mode & (FREAD|FWRITE)))
2057 return (EINVAL);
2058
2059 nmp = VTONMP(vp);
2060 if (!nmp)
2061 return (ENXIO);
2062
2063 /* First, call the common code */
2064 if ((error = nfs3_vnop_open(ap)))
2065 return (error);
2066
2067 if (!vnode_isreg(vp)) {
2068 /* Just mark that it was opened */
2069 lck_mtx_lock(&np->n_openlock);
2070 np->n_openrefcnt++;
2071 lck_mtx_unlock(&np->n_openlock);
2072 return (0);
2073 }
2074
2075 /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */
2076 accessMode = 0;
2077 if (ap->a_mode & FREAD)
2078 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2079 if (ap->a_mode & FWRITE)
2080 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2081 if (ap->a_mode & O_EXLOCK)
2082 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2083 else if (ap->a_mode & O_SHLOCK)
2084 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2085 else
2086 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2087
2088 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2089 if (!noop)
2090 return (ENOMEM);
2091
2092 restart:
2093 error = nfs_mount_state_in_use_start(nmp);
2094 if (error) {
2095 nfs_open_owner_rele(noop);
2096 return (error);
2097 }
2098
2099 error = nfs_open_file_find(np, noop, &nofp, accessMode, denyMode, 1);
2100 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2101 const char *vname = vnode_getname(NFSTOV(np));
2102 printf("nfs_vnop_open: LOST %s\n", vname);
2103 vnode_putname(vname);
2104 error = EIO;
2105 }
2106 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2107 nfs_mount_state_in_use_end(nmp, 0);
2108 nfs4_reopen(nofp, vfs_context_thread(ctx));
2109 nofp = NULL;
2110 goto restart;
2111 }
2112 if (!error)
2113 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2114 if (error) {
2115 nofp = NULL;
2116 goto out;
2117 }
2118
2119 /*
2120 * If we just created the file and the modes match, then we simply use
2121 * the open performed in the create. Otherwise, send the request.
2122 */
2123 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
2124 (nofp->nof_creator == current_thread()) &&
2125 (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) &&
2126 (denyMode == NFS_OPEN_SHARE_DENY_NONE)) {
2127 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
2128 nofp->nof_creator = NULL;
2129 } else {
2130 if (!opened)
2131 error = nfs4_open(np, nofp, accessMode, denyMode, ctx);
2132 if ((error == EACCES) && (nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
2133 (nofp->nof_creator == current_thread())) {
2134 /*
2135 * Ugh. This can happen if we just created the file with read-only
2136 * perms and we're trying to open it for real with different modes
2137 * (e.g. write-only or with a deny mode) and the server decides to
2138 * not allow the second open because of the read-only perms.
2139 * The best we can do is to just use the create's open.
2140 * We may have access we don't need or we may not have a requested
2141 * deny mode. We may log complaints later, but we'll try to avoid it.
2142 */
2143 if (denyMode != NFS_OPEN_SHARE_DENY_NONE) {
2144 const char *vname = vnode_getname(NFSTOV(np));
2145 printf("nfs4_vnop_open: deny mode foregone on create, %s\n", vname);
2146 vnode_putname(vname);
2147 }
2148 nofp->nof_creator = NULL;
2149 error = 0;
2150 }
2151 if (error)
2152 goto out;
2153 opened = 1;
2154 /*
2155 * If we had just created the file, we already had it open.
2156 * If the actual open mode is less than what we grabbed at
2157 * create time, then we'll downgrade the open here.
2158 */
2159 if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) &&
2160 (nofp->nof_creator == current_thread())) {
2161 error = nfs4_close(np, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
2162 if (error) {
2163 const char *vname = vnode_getname(NFSTOV(np));
2164 printf("nfs_vnop_open: create close error %d, %s\n", error, vname);
2165 vnode_putname(vname);
2166 }
2167 if (!nfs_mount_state_error_should_restart(error)) {
2168 error = 0;
2169 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
2170 }
2171 }
2172 }
2173
2174 out:
2175 if (nofp)
2176 nfs_open_file_clear_busy(nofp);
2177 if (nfs_mount_state_in_use_end(nmp, error)) {
2178 nofp = NULL;
2179 goto restart;
2180 }
2181 if (noop)
2182 nfs_open_owner_rele(noop);
2183 if (error) {
2184 const char *vname = vnode_getname(NFSTOV(np));
2185 printf("nfs_vnop_open: error %d, %s\n", error, vname);
2186 vnode_putname(vname);
2187 }
2188 return (error);
2189 }
2190
2191 int
2192 nfs4_close(
2193 nfsnode_t np,
2194 struct nfs_open_file *nofp,
2195 uint32_t accessMode,
2196 uint32_t denyMode,
2197 vfs_context_t ctx)
2198 {
2199 struct nfs_lock_owner *nlop;
2200 int error = 0, changed = 0, closed = 0;
2201 uint32_t newAccessMode, newDenyMode;
2202
2203 /* warn if modes don't match current state */
2204 if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) {
2205 const char *vname = vnode_getname(NFSTOV(np));
2206 printf("nfs4_close: mode mismatch %d %d, current %d %d, %s\n",
2207 accessMode, denyMode, nofp->nof_access, nofp->nof_deny, vname);
2208 vnode_putname(vname);
2209 }
2210
2211 /*
2212 * If we're closing a write-only open, we may not have a write-only count
2213 * if we also grabbed read access. So, check the read-write count.
2214 */
2215 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2216 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
2217 (nofp->nof_w == 0) && nofp->nof_rw)
2218 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2219 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2220 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
2221 (nofp->nof_w_dw == 0) && nofp->nof_rw_dw)
2222 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2223 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2224 if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) &&
2225 (nofp->nof_w_drw == 0) && nofp->nof_rw_drw)
2226 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
2227 }
2228
2229 /*
2230 * Calculate new modes: a mode bit gets removed when there's only
2231 * one count in all the corresponding counts
2232 */
2233 newAccessMode = nofp->nof_access;
2234 newDenyMode = nofp->nof_deny;
2235 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2236 (newAccessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2237 ((nofp->nof_r + nofp->nof_rw + nofp->nof_r_dw +
2238 nofp->nof_rw_dw + nofp->nof_r_drw + nofp->nof_rw_dw) == 1)) {
2239 newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2240 changed = 1;
2241 }
2242 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2243 (newAccessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2244 ((nofp->nof_w + nofp->nof_rw + nofp->nof_w_dw +
2245 nofp->nof_rw_dw + nofp->nof_w_drw + nofp->nof_rw_dw) == 1)) {
2246 newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2247 changed = 1;
2248 }
2249 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2250 (newDenyMode & NFS_OPEN_SHARE_DENY_READ) &&
2251 ((nofp->nof_r_drw + nofp->nof_w_drw + nofp->nof_rw_drw) == 1)) {
2252 newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2253 changed = 1;
2254 }
2255 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2256 (newDenyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2257 ((nofp->nof_r_drw + nofp->nof_w_drw + nofp->nof_rw_drw +
2258 nofp->nof_r_dw + nofp->nof_w_dw + nofp->nof_rw_dw) == 1)) {
2259 newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2260 changed = 1;
2261 }
2262
2263
2264 if ((newAccessMode == 0) || (nofp->nof_opencnt == 1)) {
2265 /*
2266 * No more access after this close, so clean up and close it.
2267 */
2268 closed = 1;
2269 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2270 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
2271 if (error == NFSERR_LOCKS_HELD) {
2272 /*
2273 * Hmm... the server says we have locks we need to release first
2274 * Find the lock owner and try to unlock everything.
2275 */
2276 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), 0);
2277 if (nlop) {
2278 nfs4_unlock_rpc(np, nlop, F_WRLCK, 0, UINT64_MAX, ctx);
2279 nfs_lock_owner_rele(nlop);
2280 }
2281 error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0);
2282 }
2283 } else if (changed) {
2284 /*
2285 * File is still open but with less access, so downgrade the open.
2286 */
2287 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST))
2288 error = nfs4_open_downgrade_rpc(np, nofp, ctx);
2289 }
2290
2291 if (error) {
2292 const char *vname = vnode_getname(NFSTOV(np));
2293 printf("nfs4_close: error %d, %s\n", error, vname);
2294 vnode_putname(vname);
2295 return (error);
2296 }
2297
2298 /* Decrement the corresponding open access/deny mode counter. */
2299 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2300 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2301 if (nofp->nof_r == 0)
2302 printf("nfs4_close: open(R) count underrun\n");
2303 else
2304 nofp->nof_r--;
2305 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2306 if (nofp->nof_w == 0)
2307 printf("nfs4_close: open(W) count underrun\n");
2308 else
2309 nofp->nof_w--;
2310 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2311 if (nofp->nof_rw == 0)
2312 printf("nfs4_close: open(RW) count underrun\n");
2313 else
2314 nofp->nof_rw--;
2315 }
2316 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2317 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2318 if (nofp->nof_r_dw == 0)
2319 printf("nfs4_close: open(R,DW) count underrun\n");
2320 else
2321 nofp->nof_r_dw--;
2322 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2323 if (nofp->nof_w_dw == 0)
2324 printf("nfs4_close: open(W,DW) count underrun\n");
2325 else
2326 nofp->nof_w_dw--;
2327 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2328 if (nofp->nof_rw_dw == 0)
2329 printf("nfs4_close: open(RW,DW) count underrun\n");
2330 else
2331 nofp->nof_rw_dw--;
2332 }
2333 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2334 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2335 if (nofp->nof_r_drw == 0)
2336 printf("nfs4_close: open(R,DRW) count underrun\n");
2337 else
2338 nofp->nof_r_drw--;
2339 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2340 if (nofp->nof_w_drw == 0)
2341 printf("nfs4_close: open(W,DRW) count underrun\n");
2342 else
2343 nofp->nof_w_drw--;
2344 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2345 if (nofp->nof_rw_drw == 0)
2346 printf("nfs4_close: open(RW,DRW) count underrun\n");
2347 else
2348 nofp->nof_rw_drw--;
2349 }
2350 }
2351 /* update the modes */
2352 nofp->nof_access = newAccessMode;
2353 nofp->nof_deny = newDenyMode;
2354 if (closed) {
2355 if (nofp->nof_r || nofp->nof_w ||
2356 (nofp->nof_rw && !((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && !nofp->nof_creator && (nofp->nof_rw == 1))) ||
2357 nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw ||
2358 nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw)
2359 printf("nfs4_close: unexpected count: %u %u %u dw %u %u %u drw %u %u %u flags 0x%x\n",
2360 nofp->nof_r, nofp->nof_w, nofp->nof_rw,
2361 nofp->nof_r_dw, nofp->nof_w_dw, nofp->nof_rw_dw,
2362 nofp->nof_r_drw, nofp->nof_w_drw, nofp->nof_rw_drw,
2363 nofp->nof_flags);
2364 /* clear out all open info, just to be safe */
2365 nofp->nof_access = nofp->nof_deny = 0;
2366 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2367 nofp->nof_r = nofp->nof_w = nofp->nof_rw = 0;
2368 nofp->nof_r_dw = nofp->nof_w_dw = nofp->nof_rw_dw = 0;
2369 nofp->nof_r_drw = nofp->nof_w_drw = nofp->nof_rw_drw = 0;
2370 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
2371 /* XXX we may potentially want to clean up idle/unused open file structures */
2372 }
2373 nofp->nof_opencnt--;
2374 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
2375 error = EIO;
2376 if (!nofp->nof_opencnt)
2377 nofp->nof_flags &= ~NFS_OPEN_FILE_LOST;
2378 const char *vname = vnode_getname(NFSTOV(np));
2379 printf("nfs_close: LOST%s, %s\n", !(nofp->nof_flags & NFS_OPEN_FILE_LOST) ? " (last)" : "", vname);
2380 vnode_putname(vname);
2381 }
2382 return (error);
2383 }
2384
2385 int
2386 nfs4_vnop_close(
2387 struct vnop_close_args /* {
2388 struct vnodeop_desc *a_desc;
2389 vnode_t a_vp;
2390 int a_fflag;
2391 vfs_context_t a_context;
2392 } */ *ap)
2393 {
2394 vfs_context_t ctx = ap->a_context;
2395 vnode_t vp = ap->a_vp;
2396 int fflag = ap->a_fflag;
2397 int error, common_error, accessMode, denyMode;
2398 nfsnode_t np = VTONFS(vp);
2399 struct nfsmount *nmp;
2400 struct nfs_open_owner *noop = NULL;
2401 struct nfs_open_file *nofp = NULL;
2402
2403 nmp = VTONMP(vp);
2404 if (!nmp)
2405 return (ENXIO);
2406
2407 /* First, call the common code */
2408 common_error = nfs3_vnop_close(ap);
2409
2410 if (!vnode_isreg(vp)) {
2411 /* Just mark that it was closed */
2412 lck_mtx_lock(&np->n_openlock);
2413 np->n_openrefcnt--;
2414 lck_mtx_unlock(&np->n_openlock);
2415 return (common_error);
2416 }
2417
2418 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
2419 if (!noop) {
2420 printf("nfs4_vnop_close: can't get open owner!\n");
2421 return (EIO);
2422 }
2423
2424 restart:
2425 error = nfs_mount_state_in_use_start(nmp);
2426 if (error) {
2427 nfs_open_owner_rele(noop);
2428 return (error);
2429 }
2430
2431 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
2432 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2433 nfs_mount_state_in_use_end(nmp, 0);
2434 nfs4_reopen(nofp, vfs_context_thread(ctx));
2435 nofp = NULL;
2436 goto restart;
2437 }
2438 if (error) {
2439 const char *vname = vnode_getname(NFSTOV(np));
2440 printf("nfs4_vnop_close: no open file for owner %d, %s\n", error, vname);
2441 vnode_putname(vname);
2442 error = EBADF;
2443 goto out;
2444 }
2445 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2446 if (error) {
2447 nofp = NULL;
2448 goto out;
2449 }
2450
2451 /* fflag contains some combination of: FREAD, FWRITE, FHASLOCK */
2452 accessMode = 0;
2453 if (fflag & FREAD)
2454 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2455 if (fflag & FWRITE)
2456 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2457 // XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open
2458 // if (fflag & O_EXLOCK)
2459 // denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2460 // else if (fflag & O_SHLOCK)
2461 // denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2462 // else
2463 // denyMode = NFS_OPEN_SHARE_DENY_NONE;
2464 if (fflag & FHASLOCK) {
2465 /* XXX assume FHASLOCK is for the deny mode and not flock */
2466 /* FHASLOCK flock will be unlocked in the close path, but the flag is not cleared. */
2467 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ)
2468 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2469 else if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE)
2470 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2471 else
2472 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2473 } else {
2474 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2475 }
2476
2477 if (!accessMode) {
2478 error = EINVAL;
2479 goto out;
2480 }
2481
2482 error = nfs4_close(np, nofp, accessMode, denyMode, ctx);
2483 if (error) {
2484 const char *vname = vnode_getname(NFSTOV(np));
2485 printf("nfs_vnop_close: close error %d, %s\n", error, vname);
2486 vnode_putname(vname);
2487 }
2488
2489 out:
2490 if (nofp)
2491 nfs_open_file_clear_busy(nofp);
2492 if (nfs_mount_state_in_use_end(nmp, error)) {
2493 nofp = NULL;
2494 goto restart;
2495 }
2496 if (noop)
2497 nfs_open_owner_rele(noop);
2498 if (error) {
2499 const char *vname = vnode_getname(NFSTOV(np));
2500 printf("nfs_vnop_close: error %d, %s\n", error, vname);
2501 vnode_putname(vname);
2502 }
2503 if (!error)
2504 error = common_error;
2505 return (error);
2506 }
2507
2508 int
2509 nfs4_vnop_mmap(
2510 struct vnop_mmap_args /* {
2511 struct vnodeop_desc *a_desc;
2512 vnode_t a_vp;
2513 int a_fflags;
2514 vfs_context_t a_context;
2515 } */ *ap)
2516 {
2517 vfs_context_t ctx = ap->a_context;
2518 vnode_t vp = ap->a_vp;
2519 nfsnode_t np = VTONFS(vp);
2520 int error = 0, accessMode, denyMode;
2521 struct nfsmount *nmp;
2522 struct nfs_open_owner *noop = NULL;
2523 struct nfs_open_file *nofp = NULL;
2524
2525 nmp = VTONMP(vp);
2526 if (!nmp)
2527 return (ENXIO);
2528
2529 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE)))
2530 return (EINVAL);
2531
2532 /*
2533 * fflags contains some combination of: PROT_READ, PROT_WRITE
2534 * Since it's not possible to mmap() without having the file open for reading,
2535 * read access is always there (regardless if PROT_READ is not set).
2536 */
2537 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2538 if (ap->a_fflags & PROT_WRITE)
2539 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2540 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2541
2542 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
2543 if (!noop) {
2544 printf("nfs4_vnop_mmap: no open owner\n");
2545 return (EPERM);
2546 }
2547
2548 restart:
2549 error = nfs_mount_state_in_use_start(nmp);
2550 if (error) {
2551 nfs_open_owner_rele(noop);
2552 return (error);
2553 }
2554
2555 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2556 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2557 printf("nfs4_vnop_mmap: no open file for owner %d\n", error);
2558 error = EPERM;
2559 }
2560 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2561 nfs_mount_state_in_use_end(nmp, 0);
2562 nfs4_reopen(nofp, vfs_context_thread(ctx));
2563 nofp = NULL;
2564 goto restart;
2565 }
2566 if (!error)
2567 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2568 if (error) {
2569 nofp = NULL;
2570 goto out;
2571 }
2572
2573 /*
2574 * The open reference for mmap must mirror an existing open because
2575 * we may need to reclaim it after the file is closed.
2576 * So grab another open count matching the accessMode passed in.
2577 * If we already had an mmap open, prefer read/write without deny mode.
2578 * This means we may have to drop the current mmap open first.
2579 */
2580
2581 /* determine deny mode for open */
2582 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2583 if (nofp->nof_rw)
2584 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2585 else if (nofp->nof_rw_dw)
2586 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2587 else if (nofp->nof_rw_drw)
2588 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2589 else
2590 error = EPERM;
2591 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
2592 if (nofp->nof_r)
2593 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2594 else if (nofp->nof_r_dw)
2595 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
2596 else if (nofp->nof_r_drw)
2597 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
2598 else
2599 error = EPERM;
2600 }
2601 if (error) /* mmap mode without proper open mode */
2602 goto out;
2603
2604 /*
2605 * If the existing mmap access is more than the new access OR the
2606 * existing access is the same and the existing deny mode is less,
2607 * then we'll stick with the existing mmap open mode.
2608 */
2609 if ((nofp->nof_mmap_access > accessMode) ||
2610 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode)))
2611 goto out;
2612
2613 /* update mmap open mode */
2614 if (nofp->nof_mmap_access) {
2615 error = nfs4_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2616 if (error) {
2617 if (!nfs_mount_state_error_should_restart(error))
2618 printf("nfs_vnop_mmap: close of previous mmap mode failed: %d\n", error);
2619 const char *vname = vnode_getname(NFSTOV(np));
2620 printf("nfs_vnop_mmap: update, close error %d, %s\n", error, vname);
2621 vnode_putname(vname);
2622 goto out;
2623 }
2624 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2625 }
2626
2627 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2628 if (denyMode == NFS_OPEN_SHARE_DENY_NONE)
2629 nofp->nof_rw++;
2630 else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE)
2631 nofp->nof_rw_dw++;
2632 else /* NFS_OPEN_SHARE_DENY_BOTH */
2633 nofp->nof_rw_drw++;
2634 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2635 if (denyMode == NFS_OPEN_SHARE_DENY_NONE)
2636 nofp->nof_r++;
2637 else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE)
2638 nofp->nof_r_dw++;
2639 else /* NFS_OPEN_SHARE_DENY_BOTH */
2640 nofp->nof_r_drw++;
2641 }
2642 nofp->nof_mmap_access = accessMode;
2643 nofp->nof_mmap_deny = denyMode;
2644 nofp->nof_opencnt++;
2645
2646 out:
2647 if (nofp)
2648 nfs_open_file_clear_busy(nofp);
2649 if (nfs_mount_state_in_use_end(nmp, error)) {
2650 nofp = NULL;
2651 goto restart;
2652 }
2653 if (noop)
2654 nfs_open_owner_rele(noop);
2655 return (error);
2656 }
2657
2658
2659 int
2660 nfs4_vnop_mnomap(
2661 struct vnop_mnomap_args /* {
2662 struct vnodeop_desc *a_desc;
2663 vnode_t a_vp;
2664 vfs_context_t a_context;
2665 } */ *ap)
2666 {
2667 vfs_context_t ctx = ap->a_context;
2668 vnode_t vp = ap->a_vp;
2669 nfsnode_t np = VTONFS(vp);
2670 struct nfsmount *nmp;
2671 struct nfs_open_file *nofp = NULL;
2672 int error;
2673
2674 nmp = VTONMP(vp);
2675 if (!nmp)
2676 return (ENXIO);
2677
2678 /* walk all open files and close all mmap opens */
2679 loop:
2680 error = nfs_mount_state_in_use_start(nmp);
2681 if (error)
2682 return (error);
2683 lck_mtx_lock(&np->n_openlock);
2684 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
2685 if (!nofp->nof_mmap_access)
2686 continue;
2687 lck_mtx_unlock(&np->n_openlock);
2688 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2689 nfs_mount_state_in_use_end(nmp, 0);
2690 nfs4_reopen(nofp, vfs_context_thread(ctx));
2691 goto loop;
2692 }
2693 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
2694 if (error) {
2695 lck_mtx_lock(&np->n_openlock);
2696 break;
2697 }
2698 if (nofp->nof_mmap_access) {
2699 error = nfs4_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
2700 if (!nfs_mount_state_error_should_restart(error)) {
2701 if (error) /* not a state-operation-restarting error, so just clear the access */
2702 printf("nfs_vnop_mnomap: close of mmap mode failed: %d\n", error);
2703 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
2704 }
2705 if (error) {
2706 const char *vname = vnode_getname(NFSTOV(np));
2707 printf("nfs_vnop_mnomap: error %d, %s\n", error, vname);
2708 vnode_putname(vname);
2709 }
2710 }
2711 nfs_open_file_clear_busy(nofp);
2712 nfs_mount_state_in_use_end(nmp, error);
2713 goto loop;
2714 }
2715 lck_mtx_unlock(&np->n_openlock);
2716 nfs_mount_state_in_use_end(nmp, error);
2717 return (error);
2718 }
2719
2720 /*
2721 * Search a node's lock owner list for the owner for this process.
2722 * If not found and "alloc" is set, then allocate a new one.
2723 */
2724 struct nfs_lock_owner *
2725 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
2726 {
2727 pid_t pid = proc_pid(p);
2728 struct nfs_lock_owner *nlop, *newnlop = NULL;
2729
2730 tryagain:
2731 lck_mtx_lock(&np->n_openlock);
2732 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
2733 if (nlop->nlo_pid != pid)
2734 continue;
2735 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==))
2736 break;
2737 /* stale lock owner... reuse it if we can */
2738 if (nlop->nlo_refcnt) {
2739 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
2740 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
2741 lck_mtx_unlock(&np->n_openlock);
2742 goto tryagain;
2743 }
2744 nlop->nlo_pid_start = p->p_start;
2745 nlop->nlo_seqid = 0;
2746 nlop->nlo_stategenid = 0;
2747 break;
2748 }
2749
2750 if (!nlop && !newnlop && alloc) {
2751 lck_mtx_unlock(&np->n_openlock);
2752 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
2753 if (!newnlop)
2754 return (NULL);
2755 bzero(newnlop, sizeof(*newnlop));
2756 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
2757 newnlop->nlo_pid = pid;
2758 newnlop->nlo_pid_start = p->p_start;
2759 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
2760 TAILQ_INIT(&newnlop->nlo_locks);
2761 goto tryagain;
2762 }
2763 if (!nlop && newnlop) {
2764 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
2765 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
2766 nlop = newnlop;
2767 }
2768 lck_mtx_unlock(&np->n_openlock);
2769
2770 if (newnlop && (nlop != newnlop))
2771 nfs_lock_owner_destroy(newnlop);
2772
2773 if (nlop)
2774 nfs_lock_owner_ref(nlop);
2775
2776 return (nlop);
2777 }
2778
2779 /*
2780 * destroy a lock owner that's no longer needed
2781 */
2782 void
2783 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
2784 {
2785 if (nlop->nlo_open_owner) {
2786 nfs_open_owner_rele(nlop->nlo_open_owner);
2787 nlop->nlo_open_owner = NULL;
2788 }
2789 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
2790 FREE(nlop, M_TEMP);
2791 }
2792
2793 /*
2794 * acquire a reference count on a lock owner
2795 */
2796 void
2797 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
2798 {
2799 lck_mtx_lock(&nlop->nlo_lock);
2800 nlop->nlo_refcnt++;
2801 lck_mtx_unlock(&nlop->nlo_lock);
2802 }
2803
2804 /*
2805 * drop a reference count on a lock owner and destroy it if
2806 * it is no longer referenced and no longer on the mount's list.
2807 */
2808 void
2809 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
2810 {
2811 lck_mtx_lock(&nlop->nlo_lock);
2812 if (nlop->nlo_refcnt < 1)
2813 panic("nfs_lock_owner_rele: no refcnt");
2814 nlop->nlo_refcnt--;
2815 if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
2816 panic("nfs_lock_owner_rele: busy");
2817 /* XXX we may potentially want to clean up idle/unused lock owner structures */
2818 if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
2819 lck_mtx_unlock(&nlop->nlo_lock);
2820 return;
2821 }
2822 /* owner is no longer referenced or linked to mount, so destroy it */
2823 lck_mtx_unlock(&nlop->nlo_lock);
2824 nfs_lock_owner_destroy(nlop);
2825 }
2826
2827 /*
2828 * Mark a lock owner as busy because we are about to
2829 * start an operation that uses and updates lock owner state.
2830 */
2831 int
2832 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
2833 {
2834 struct nfsmount *nmp;
2835 struct timespec ts = {2, 0};
2836 int error = 0, slpflag;
2837
2838 nmp = nlop->nlo_open_owner->noo_mount;
2839 if (!nmp)
2840 return (ENXIO);
2841 slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
2842
2843 lck_mtx_lock(&nlop->nlo_lock);
2844 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
2845 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
2846 break;
2847 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
2848 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
2849 }
2850 if (!error)
2851 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
2852 lck_mtx_unlock(&nlop->nlo_lock);
2853
2854 return (error);
2855 }
2856
2857 /*
2858 * Clear the busy flag on a lock owner and wake up anyone waiting
2859 * to mark it busy.
2860 */
2861 void
2862 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
2863 {
2864 int wanted;
2865
2866 lck_mtx_lock(&nlop->nlo_lock);
2867 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY))
2868 panic("nfs_lock_owner_clear_busy");
2869 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
2870 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT);
2871 lck_mtx_unlock(&nlop->nlo_lock);
2872 if (wanted)
2873 wakeup(nlop);
2874 }
2875
2876 /*
2877 * Insert a held lock into a lock owner's sorted list.
2878 * (flock locks are always inserted at the head the list)
2879 */
2880 void
2881 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
2882 {
2883 struct nfs_file_lock *nflp;
2884
2885 /* insert new lock in lock owner's held lock list */
2886 lck_mtx_lock(&nlop->nlo_lock);
2887 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
2888 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
2889 } else {
2890 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
2891 if (newnflp->nfl_start < nflp->nfl_start)
2892 break;
2893 }
2894 if (nflp)
2895 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
2896 else
2897 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
2898 }
2899 lck_mtx_unlock(&nlop->nlo_lock);
2900 }
2901
2902 /*
2903 * Get a file lock structure for this lock owner.
2904 */
2905 struct nfs_file_lock *
2906 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
2907 {
2908 struct nfs_file_lock *nflp = NULL;
2909
2910 lck_mtx_lock(&nlop->nlo_lock);
2911 if (!nlop->nlo_alock.nfl_owner) {
2912 nflp = &nlop->nlo_alock;
2913 nflp->nfl_owner = nlop;
2914 }
2915 lck_mtx_unlock(&nlop->nlo_lock);
2916 if (!nflp) {
2917 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
2918 if (!nflp)
2919 return (NULL);
2920 bzero(nflp, sizeof(*nflp));
2921 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
2922 nflp->nfl_owner = nlop;
2923 }
2924 nfs_lock_owner_ref(nlop);
2925 return (nflp);
2926 }
2927
2928 /*
2929 * destroy the given NFS file lock structure
2930 */
2931 void
2932 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
2933 {
2934 struct nfs_lock_owner *nlop = nflp->nfl_owner;
2935
2936 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
2937 nflp->nfl_owner = NULL;
2938 FREE(nflp, M_TEMP);
2939 } else {
2940 lck_mtx_lock(&nlop->nlo_lock);
2941 bzero(nflp, sizeof(nflp));
2942 lck_mtx_unlock(&nlop->nlo_lock);
2943 }
2944 nfs_lock_owner_rele(nlop);
2945 }
2946
2947 /*
2948 * Check if one file lock conflicts with another.
2949 * (nflp1 is the new lock. nflp2 is the existing lock.)
2950 */
2951 int
2952 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
2953 {
2954 /* no conflict if lock is dead */
2955 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD))
2956 return (0);
2957 /* no conflict if it's ours - unless the lock style doesn't match */
2958 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
2959 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
2960 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
2961 (nflp1->nfl_start > nflp2->nfl_start) &&
2962 (nflp1->nfl_end < nflp2->nfl_end))
2963 *willsplit = 1;
2964 return (0);
2965 }
2966 /* no conflict if ranges don't overlap */
2967 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start))
2968 return (0);
2969 /* no conflict if neither lock is exclusive */
2970 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK))
2971 return (0);
2972 /* conflict */
2973 return (1);
2974 }
2975
2976 /*
2977 * Send an NFSv4 LOCK RPC to the server.
2978 */
2979 int
2980 nfs4_lock_rpc(
2981 nfsnode_t np,
2982 struct nfs_open_file *nofp,
2983 struct nfs_file_lock *nflp,
2984 int reclaim,
2985 thread_t thd,
2986 kauth_cred_t cred)
2987 {
2988 struct nfs_lock_owner *nlop = nflp->nfl_owner;
2989 struct nfsmount *nmp;
2990 struct nfsm_chain nmreq, nmrep;
2991 uint64_t xid;
2992 uint32_t locktype;
2993 int error = 0, lockerror = ENOENT, newlocker, numops, status;
2994
2995 nmp = NFSTONMP(np);
2996 if (!nmp)
2997 return (ENXIO);
2998
2999 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3000 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3001 ((nflp->nfl_type == F_WRLCK) ?
3002 NFS_LOCK_TYPE_WRITEW :
3003 NFS_LOCK_TYPE_READW) :
3004 ((nflp->nfl_type == F_WRLCK) ?
3005 NFS_LOCK_TYPE_WRITE :
3006 NFS_LOCK_TYPE_READ);
3007 if (newlocker) {
3008 error = nfs_open_file_set_busy(nofp, thd);
3009 if (error)
3010 return (error);
3011 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3012 if (error) {
3013 nfs_open_file_clear_busy(nofp);
3014 return (error);
3015 }
3016 if (!nlop->nlo_open_owner) {
3017 nfs_open_owner_ref(nofp->nof_owner);
3018 nlop->nlo_open_owner = nofp->nof_owner;
3019 }
3020 }
3021 error = nfs_lock_owner_set_busy(nlop, thd);
3022 if (error) {
3023 if (newlocker) {
3024 nfs_open_owner_clear_busy(nofp->nof_owner);
3025 nfs_open_file_clear_busy(nofp);
3026 }
3027 return (error);
3028 }
3029
3030 nfsm_chain_null(&nmreq);
3031 nfsm_chain_null(&nmrep);
3032
3033 // PUTFH, GETATTR, LOCK
3034 numops = 3;
3035 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3036 nfsm_chain_add_compound_header(error, &nmreq, "lock", numops);
3037 numops--;
3038 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3039 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3040 numops--;
3041 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3042 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
3043 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
3044 numops--;
3045 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3046 nfsm_chain_add_32(error, &nmreq, locktype);
3047 nfsm_chain_add_32(error, &nmreq, reclaim);
3048 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3049 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3050 nfsm_chain_add_32(error, &nmreq, newlocker);
3051 if (newlocker) {
3052 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3053 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3054 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3055 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3056 } else {
3057 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3058 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3059 }
3060 nfsm_chain_build_done(error, &nmreq);
3061 nfsm_assert(error, (numops == 0), EPROTO);
3062 nfsmout_if(error);
3063
3064 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, (reclaim ? R_RECOVER : 0), &nmrep, &xid, &status);
3065
3066 if ((lockerror = nfs_node_lock(np)))
3067 error = lockerror;
3068 nfsm_chain_skip_tag(error, &nmrep);
3069 nfsm_chain_get_32(error, &nmrep, numops);
3070 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3071 nfsmout_if(error);
3072 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3073 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, NULL, &xid);
3074 nfsmout_if(error);
3075 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3076 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3077 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3078
3079 /* Update the lock owner's stategenid once it appears the server has state for it. */
3080 /* We determine this by noting the request was successful (we got a stateid). */
3081 if (newlocker && !error)
3082 nlop->nlo_stategenid = nmp->nm_stategenid;
3083 nfsmout:
3084 if (!lockerror)
3085 nfs_node_unlock(np);
3086 nfs_lock_owner_clear_busy(nlop);
3087 if (newlocker) {
3088 nfs_open_owner_clear_busy(nofp->nof_owner);
3089 nfs_open_file_clear_busy(nofp);
3090 }
3091 nfsm_chain_cleanup(&nmreq);
3092 nfsm_chain_cleanup(&nmrep);
3093 return (error);
3094 }
3095
3096 /*
3097 * Send an NFSv4 LOCKU RPC to the server.
3098 */
3099 int
3100 nfs4_unlock_rpc(
3101 nfsnode_t np,
3102 struct nfs_lock_owner *nlop,
3103 int type,
3104 uint64_t start,
3105 uint64_t end,
3106 vfs_context_t ctx)
3107 {
3108 struct nfsmount *nmp;
3109 struct nfsm_chain nmreq, nmrep;
3110 uint64_t xid;
3111 int error = 0, lockerror = ENOENT, numops, status;
3112
3113 nmp = NFSTONMP(np);
3114 if (!nmp)
3115 return (ENXIO);
3116
3117 error = nfs_lock_owner_set_busy(nlop, vfs_context_thread(ctx));
3118 if (error)
3119 return (error);
3120
3121 nfsm_chain_null(&nmreq);
3122 nfsm_chain_null(&nmrep);
3123
3124 // PUTFH, GETATTR, LOCKU
3125 numops = 3;
3126 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3127 nfsm_chain_add_compound_header(error, &nmreq, "unlock", numops);
3128 numops--;
3129 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3130 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3131 numops--;
3132 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3133 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
3134 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
3135 numops--;
3136 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3137 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3138 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3139 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3140 nfsm_chain_add_64(error, &nmreq, start);
3141 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3142 nfsm_chain_build_done(error, &nmreq);
3143 nfsm_assert(error, (numops == 0), EPROTO);
3144 nfsmout_if(error);
3145
3146 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
3147
3148 if ((lockerror = nfs_node_lock(np)))
3149 error = lockerror;
3150 nfsm_chain_skip_tag(error, &nmrep);
3151 nfsm_chain_get_32(error, &nmrep, numops);
3152 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3153 nfsmout_if(error);
3154 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3155 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, NULL, &xid);
3156 nfsmout_if(error);
3157 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3158 nfs_owner_seqid_increment(NULL, nlop, error);
3159 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3160 nfsmout:
3161 if (!lockerror)
3162 nfs_node_unlock(np);
3163 nfs_lock_owner_clear_busy(nlop);
3164 nfsm_chain_cleanup(&nmreq);
3165 nfsm_chain_cleanup(&nmrep);
3166 return (error);
3167 }
3168
3169 /*
3170 * Check for any conflicts with the given lock.
3171 *
3172 * Checking for a lock doesn't require the file to be opened.
3173 * So we skip all the open owner, open file, lock owner work
3174 * and just check for a conflicting lock.
3175 */
3176 int
3177 nfs4_getlock(
3178 nfsnode_t np,
3179 struct nfs_lock_owner *nlop,
3180 struct flock *fl,
3181 uint64_t start,
3182 uint64_t end,
3183 vfs_context_t ctx)
3184 {
3185 struct nfsmount *nmp;
3186 struct nfs_file_lock *nflp;
3187 struct nfsm_chain nmreq, nmrep;
3188 uint64_t xid, val64 = 0;
3189 uint32_t val = 0;
3190 int error = 0, lockerror = ENOENT, numops, status;
3191
3192 nmp = NFSTONMP(np);
3193 if (!nmp)
3194 return (ENXIO);
3195
3196 lck_mtx_lock(&np->n_openlock);
3197 /* scan currently held locks for conflict */
3198 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3199 if (nflp->nfl_flags & NFS_FILE_LOCK_BLOCKED)
3200 continue;
3201 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3202 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK)))
3203 break;
3204 }
3205 if (nflp) {
3206 /* found a conflicting lock */
3207 fl->l_type = nflp->nfl_type;
3208 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3209 fl->l_start = nflp->nfl_start;
3210 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3211 fl->l_whence = SEEK_SET;
3212 }
3213 lck_mtx_unlock(&np->n_openlock);
3214 if (nflp)
3215 return (0);
3216
3217 /* no conflict found locally, so ask the server */
3218
3219 nfsm_chain_null(&nmreq);
3220 nfsm_chain_null(&nmrep);
3221
3222 // PUTFH, GETATTR, LOCKT
3223 numops = 3;
3224 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3225 nfsm_chain_add_compound_header(error, &nmreq, "locktest", numops);
3226 numops--;
3227 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3228 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3229 numops--;
3230 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3231 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
3232 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
3233 numops--;
3234 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3235 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3236 nfsm_chain_add_64(error, &nmreq, start);
3237 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3238 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3239 nfsm_chain_build_done(error, &nmreq);
3240 nfsm_assert(error, (numops == 0), EPROTO);
3241 nfsmout_if(error);
3242
3243 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
3244
3245 if ((lockerror = nfs_node_lock(np)))
3246 error = lockerror;
3247 nfsm_chain_skip_tag(error, &nmrep);
3248 nfsm_chain_get_32(error, &nmrep, numops);
3249 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3250 nfsmout_if(error);
3251 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3252 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, NULL, &xid);
3253 nfsmout_if(error);
3254 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3255 if (error == NFSERR_DENIED) {
3256 error = 0;
3257 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3258 nfsm_chain_get_64(error, &nmrep, val64);
3259 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3260 nfsm_chain_get_32(error, &nmrep, val);
3261 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3262 fl->l_pid = 0;
3263 fl->l_whence = SEEK_SET;
3264 } else if (!error) {
3265 fl->l_type = F_UNLCK;
3266 }
3267 nfsmout:
3268 if (!lockerror)
3269 nfs_node_unlock(np);
3270 nfsm_chain_cleanup(&nmreq);
3271 nfsm_chain_cleanup(&nmrep);
3272 return (error);
3273 }
3274
3275 /*
3276 * Acquire a file lock for the given range.
3277 *
3278 * Add the lock (request) to the lock queue.
3279 * Scan the lock queue for any conflicting locks.
3280 * If a conflict is found, block or return an error.
3281 * Once end of queue is reached, send request to the server.
3282 * If the server grants the lock, scan the lock queue and
3283 * update any existing locks. Then (optionally) scan the
3284 * queue again to coalesce any locks adjacent to the new one.
3285 */
3286 int
3287 nfs4_setlock(
3288 nfsnode_t np,
3289 struct nfs_open_file *nofp,
3290 struct nfs_lock_owner *nlop,
3291 int op,
3292 uint64_t start,
3293 uint64_t end,
3294 int style,
3295 short type,
3296 vfs_context_t ctx)
3297 {
3298 struct nfsmount *nmp;
3299 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3300 struct nfs_file_lock *coalnflp;
3301 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3302 struct timespec ts = {1, 0};
3303
3304 nmp = NFSTONMP(np);
3305 if (!nmp)
3306 return (ENXIO);
3307 slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
3308
3309 /* allocate a new lock */
3310 newnflp = nfs_file_lock_alloc(nlop);
3311 if (!newnflp)
3312 return (ENOLCK);
3313 newnflp->nfl_start = start;
3314 newnflp->nfl_end = end;
3315 newnflp->nfl_type = type;
3316 if (op == F_SETLKW)
3317 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3318 newnflp->nfl_flags |= style;
3319 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3320
3321 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3322 /*
3323 * For exclusive flock-style locks, if we block waiting for the
3324 * lock, we need to first release any currently held shared
3325 * flock-style lock. So, the first thing we do is check if we
3326 * have a shared flock-style lock.
3327 */
3328 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3329 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK))
3330 nflp = NULL;
3331 if (nflp && (nflp->nfl_type != F_RDLCK))
3332 nflp = NULL;
3333 flocknflp = nflp;
3334 }
3335
3336 restart:
3337 restart = 0;
3338 error = nfs_mount_state_in_use_start(nmp);
3339 if (error)
3340 goto error_out;
3341 inuse = 1;
3342 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3343 nfs_mount_state_in_use_end(nmp, 0);
3344 inuse = 0;
3345 nfs4_reopen(nofp, vfs_context_thread(ctx));
3346 goto restart;
3347 }
3348
3349 lck_mtx_lock(&np->n_openlock);
3350 if (!inqueue) {
3351 /* insert new lock at beginning of list */
3352 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3353 inqueue = 1;
3354 }
3355
3356 /* scan current list of locks (held and pending) for conflicts */
3357 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = TAILQ_NEXT(nflp, nfl_link)) {
3358 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit))
3359 continue;
3360 /* Conflict */
3361 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3362 error = EAGAIN;
3363 break;
3364 }
3365 /* Block until this lock is no longer held. */
3366 if (nflp->nfl_blockcnt == UINT_MAX) {
3367 error = ENOLCK;
3368 break;
3369 }
3370 nflp->nfl_blockcnt++;
3371 do {
3372 if (flocknflp) {
3373 /* release any currently held shared lock before sleeping */
3374 lck_mtx_unlock(&np->n_openlock);
3375 nfs_mount_state_in_use_end(nmp, 0);
3376 inuse = 0;
3377 error = nfs4_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3378 flocknflp = NULL;
3379 if (!error)
3380 error = nfs_mount_state_in_use_start(nmp);
3381 if (error) {
3382 lck_mtx_lock(&np->n_openlock);
3383 break;
3384 }
3385 inuse = 1;
3386 lck_mtx_lock(&np->n_openlock);
3387 /* no need to block/sleep if the conflict is gone */
3388 if (!nfs_file_lock_conflict(newnflp, nflp, NULL))
3389 break;
3390 }
3391 msleep(nflp, &np->n_openlock, slpflag, "nfs4_setlock_blocked", &ts);
3392 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3393 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3394 /* looks like we have a recover pending... restart */
3395 restart = 1;
3396 lck_mtx_unlock(&np->n_openlock);
3397 nfs_mount_state_in_use_end(nmp, 0);
3398 inuse = 0;
3399 lck_mtx_lock(&np->n_openlock);
3400 break;
3401 }
3402 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
3403 nflp->nfl_blockcnt--;
3404 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
3405 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3406 nfs_file_lock_destroy(nflp);
3407 }
3408 if (error || restart)
3409 break;
3410 }
3411 lck_mtx_unlock(&np->n_openlock);
3412 if (restart)
3413 goto restart;
3414 if (error)
3415 goto error_out;
3416
3417 if (willsplit) {
3418 /*
3419 * It looks like this operation is splitting a lock.
3420 * We allocate a new lock now so we don't have to worry
3421 * about the allocation failing after we've updated some state.
3422 */
3423 nflp2 = nfs_file_lock_alloc(nlop);
3424 if (!nflp2) {
3425 error = ENOLCK;
3426 goto error_out;
3427 }
3428 }
3429
3430 /* once scan for local conflicts is clear, send request to server */
3431 if ((error = nfs_open_state_set_busy(np, ctx)))
3432 goto error_out;
3433 busy = 1;
3434 delay = 0;
3435 do {
3436 error = nfs4_lock_rpc(np, nofp, newnflp, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
3437 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE)))
3438 break;
3439 /* request was denied due to either conflict or grace period */
3440 if ((error != NFSERR_GRACE) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
3441 error = EAGAIN;
3442 break;
3443 }
3444 if (flocknflp) {
3445 /* release any currently held shared lock before sleeping */
3446 nfs_open_state_clear_busy(np);
3447 busy = 0;
3448 nfs_mount_state_in_use_end(nmp, 0);
3449 inuse = 0;
3450 error2 = nfs4_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
3451 flocknflp = NULL;
3452 if (!error2)
3453 error2 = nfs_mount_state_in_use_start(nmp);
3454 if (!error2) {
3455 inuse = 1;
3456 error2 = nfs_open_state_set_busy(np, ctx);
3457 }
3458 if (error2) {
3459 error = error2;
3460 break;
3461 }
3462 busy = 1;
3463 }
3464 /* wait a little bit and send the request again */
3465 if (error == NFSERR_GRACE)
3466 delay = 4;
3467 if (delay < 4)
3468 delay++;
3469 tsleep(newnflp, slpflag, "nfs4_setlock_delay", delay * (hz/2));
3470 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
3471 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
3472 /* looks like we have a recover pending... restart */
3473 nfs_open_state_clear_busy(np);
3474 busy = 0;
3475 nfs_mount_state_in_use_end(nmp, 0);
3476 inuse = 0;
3477 goto restart;
3478 }
3479 } while (!error);
3480
3481 error_out:
3482 if (nfs_mount_state_error_should_restart(error)) {
3483 /* looks like we need to restart this operation */
3484 if (busy) {
3485 nfs_open_state_clear_busy(np);
3486 busy = 0;
3487 }
3488 if (inuse) {
3489 nfs_mount_state_in_use_end(nmp, error);
3490 inuse = 0;
3491 }
3492 goto restart;
3493 }
3494 lck_mtx_lock(&np->n_openlock);
3495 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
3496 if (error) {
3497 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3498 if (newnflp->nfl_blockcnt) {
3499 /* wake up anyone blocked on this lock */
3500 wakeup(newnflp);
3501 } else {
3502 /* remove newnflp from lock list and destroy */
3503 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
3504 nfs_file_lock_destroy(newnflp);
3505 }
3506 lck_mtx_unlock(&np->n_openlock);
3507 if (busy)
3508 nfs_open_state_clear_busy(np);
3509 if (inuse)
3510 nfs_mount_state_in_use_end(nmp, error);
3511 if (nflp2)
3512 nfs_file_lock_destroy(nflp2);
3513 return (error);
3514 }
3515
3516 /* server granted the lock */
3517
3518 /*
3519 * Scan for locks to update.
3520 *
3521 * Locks completely covered are killed.
3522 * At most two locks may need to be clipped.
3523 * It's possible that a single lock may need to be split.
3524 */
3525 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3526 if (nflp == newnflp)
3527 continue;
3528 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3529 continue;
3530 if (nflp->nfl_owner != nlop)
3531 continue;
3532 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))
3533 continue;
3534 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start))
3535 continue;
3536 /* here's one to update */
3537 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
3538 /* The entire lock is being replaced. */
3539 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3540 lck_mtx_lock(&nlop->nlo_lock);
3541 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3542 lck_mtx_unlock(&nlop->nlo_lock);
3543 /* lock will be destroyed below, if no waiters */
3544 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
3545 /* We're replacing a range in the middle of a lock. */
3546 /* The current lock will be split into two locks. */
3547 /* Update locks and insert new lock after current lock. */
3548 nflp2->nfl_flags |= (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK);
3549 nflp2->nfl_type = nflp->nfl_type;
3550 nflp2->nfl_start = newnflp->nfl_end + 1;
3551 nflp2->nfl_end = nflp->nfl_end;
3552 nflp->nfl_end = newnflp->nfl_start - 1;
3553 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
3554 nfs_lock_owner_insert_held_lock(nlop, nflp2);
3555 nextnflp = nflp2;
3556 nflp2 = NULL;
3557 } else if (newnflp->nfl_start > nflp->nfl_start) {
3558 /* We're replacing the end of a lock. */
3559 nflp->nfl_end = newnflp->nfl_start - 1;
3560 } else if (newnflp->nfl_end < nflp->nfl_end) {
3561 /* We're replacing the start of a lock. */
3562 nflp->nfl_start = newnflp->nfl_end + 1;
3563 }
3564 if (nflp->nfl_blockcnt) {
3565 /* wake up anyone blocked on this lock */
3566 wakeup(nflp);
3567 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3568 /* remove nflp from lock list and destroy */
3569 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3570 nfs_file_lock_destroy(nflp);
3571 }
3572 }
3573
3574 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3575
3576 /*
3577 * POSIX locks should be coalesced when possible.
3578 */
3579 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
3580 /*
3581 * Walk through the lock queue and check each of our held locks with
3582 * the previous and next locks in the lock owner's "held lock list".
3583 * If the two locks can be coalesced, we merge the current lock into
3584 * the other (previous or next) lock. Merging this way makes sure that
3585 * lock ranges are always merged forward in the lock queue. This is
3586 * important because anyone blocked on the lock being "merged away"
3587 * will still need to block on that range and it will simply continue
3588 * checking locks that are further down the list.
3589 */
3590 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3591 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3592 continue;
3593 if (nflp->nfl_owner != nlop)
3594 continue;
3595 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX)
3596 continue;
3597 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
3598 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3599 (coalnflp->nfl_type == nflp->nfl_type) &&
3600 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
3601 coalnflp->nfl_end = nflp->nfl_end;
3602 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3603 lck_mtx_lock(&nlop->nlo_lock);
3604 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3605 lck_mtx_unlock(&nlop->nlo_lock);
3606 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3607 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
3608 (coalnflp->nfl_type == nflp->nfl_type) &&
3609 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
3610 coalnflp->nfl_start = nflp->nfl_start;
3611 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3612 lck_mtx_lock(&nlop->nlo_lock);
3613 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3614 lck_mtx_unlock(&nlop->nlo_lock);
3615 }
3616 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD))
3617 continue;
3618 if (nflp->nfl_blockcnt) {
3619 /* wake up anyone blocked on this lock */
3620 wakeup(nflp);
3621 } else {
3622 /* remove nflp from lock list and destroy */
3623 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3624 nfs_file_lock_destroy(nflp);
3625 }
3626 }
3627 }
3628
3629 lck_mtx_unlock(&np->n_openlock);
3630 nfs_open_state_clear_busy(np);
3631 nfs_mount_state_in_use_end(nmp, error);
3632
3633 if (nflp2)
3634 nfs_file_lock_destroy(nflp2);
3635 return (error);
3636 }
3637
3638 int
3639 nfs4_unlock(
3640 nfsnode_t np,
3641 struct nfs_open_file *nofp,
3642 struct nfs_lock_owner *nlop,
3643 uint64_t start,
3644 uint64_t end,
3645 int style,
3646 vfs_context_t ctx)
3647 {
3648 struct nfsmount *nmp;
3649 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
3650 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
3651
3652 nmp = NFSTONMP(np);
3653 if (!nmp)
3654 return (ENXIO);
3655
3656 restart:
3657 if ((error = nfs_mount_state_in_use_start(nmp)))
3658 return (error);
3659 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3660 nfs_mount_state_in_use_end(nmp, 0);
3661 nfs4_reopen(nofp, vfs_context_thread(ctx));
3662 goto restart;
3663 }
3664 if ((error = nfs_open_state_set_busy(np, ctx))) {
3665 nfs_mount_state_in_use_end(nmp, error);
3666 return (error);
3667 }
3668
3669 lck_mtx_lock(&np->n_openlock);
3670 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
3671 /*
3672 * We may need to allocate a new lock if an existing lock gets split.
3673 * So, we first scan the list to check for a split, and if there's
3674 * going to be one, we'll allocate one now.
3675 */
3676 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3677 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3678 continue;
3679 if (nflp->nfl_owner != nlop)
3680 continue;
3681 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
3682 continue;
3683 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
3684 continue;
3685 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
3686 willsplit = 1;
3687 break;
3688 }
3689 }
3690 if (willsplit) {
3691 lck_mtx_unlock(&np->n_openlock);
3692 nfs_open_state_clear_busy(np);
3693 nfs_mount_state_in_use_end(nmp, 0);
3694 newnflp = nfs_file_lock_alloc(nlop);
3695 if (!newnflp)
3696 return (ENOMEM);
3697 goto restart;
3698 }
3699 }
3700
3701 /*
3702 * Free all of our locks in the given range.
3703 *
3704 * Note that this process requires sending requests to the server.
3705 * Because of this, we will release the n_openlock while performing
3706 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
3707 * locks from changing underneath us. However, other entries in the
3708 * list may be removed. So we need to be careful walking the list.
3709 */
3710
3711 /*
3712 * Don't unlock ranges that are held by other-style locks.
3713 * If style is posix, don't send any unlock rpcs if flock is held.
3714 * If we unlock an flock, don't send unlock rpcs for any posix-style
3715 * ranges held - instead send unlocks for the ranges not held.
3716 */
3717 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
3718 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
3719 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK))
3720 send_unlock_rpcs = 0;
3721 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
3722 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
3723 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
3724 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
3725 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
3726 uint64_t s = 0;
3727 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
3728 while (nflp) {
3729 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
3730 /* unlock the range preceding this lock */
3731 lck_mtx_unlock(&np->n_openlock);
3732 error = nfs4_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, ctx);
3733 if (nfs_mount_state_error_should_restart(error)) {
3734 nfs_open_state_clear_busy(np);
3735 nfs_mount_state_in_use_end(nmp, error);
3736 goto restart;
3737 }
3738 lck_mtx_lock(&np->n_openlock);
3739 if (error)
3740 goto out;
3741 s = nflp->nfl_end+1;
3742 }
3743 nflp = TAILQ_NEXT(nflp, nfl_lolink);
3744 }
3745 lck_mtx_unlock(&np->n_openlock);
3746 error = nfs4_unlock_rpc(np, nlop, type, s, end, ctx);
3747 if (nfs_mount_state_error_should_restart(error)) {
3748 nfs_open_state_clear_busy(np);
3749 nfs_mount_state_in_use_end(nmp, error);
3750 goto restart;
3751 }
3752 lck_mtx_lock(&np->n_openlock);
3753 if (error)
3754 goto out;
3755 send_unlock_rpcs = 0;
3756 }
3757
3758 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
3759 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))
3760 continue;
3761 if (nflp->nfl_owner != nlop)
3762 continue;
3763 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style)
3764 continue;
3765 if ((start > nflp->nfl_end) || (end < nflp->nfl_start))
3766 continue;
3767 /* here's one to unlock */
3768 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
3769 /* The entire lock is being unlocked. */
3770 if (send_unlock_rpcs) {
3771 lck_mtx_unlock(&np->n_openlock);
3772 error = nfs4_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, ctx);
3773 if (nfs_mount_state_error_should_restart(error)) {
3774 nfs_open_state_clear_busy(np);
3775 nfs_mount_state_in_use_end(nmp, error);
3776 goto restart;
3777 }
3778 lck_mtx_lock(&np->n_openlock);
3779 }
3780 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3781 if (error)
3782 break;
3783 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
3784 lck_mtx_lock(&nlop->nlo_lock);
3785 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
3786 lck_mtx_unlock(&nlop->nlo_lock);
3787 /* lock will be destroyed below, if no waiters */
3788 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
3789 /* We're unlocking a range in the middle of a lock. */
3790 /* The current lock will be split into two locks. */
3791 if (send_unlock_rpcs) {
3792 lck_mtx_unlock(&np->n_openlock);
3793 error = nfs4_unlock_rpc(np, nlop, nflp->nfl_type, start, end, ctx);
3794 if (nfs_mount_state_error_should_restart(error)) {
3795 nfs_open_state_clear_busy(np);
3796 nfs_mount_state_in_use_end(nmp, error);
3797 goto restart;
3798 }
3799 lck_mtx_lock(&np->n_openlock);
3800 }
3801 if (error)
3802 break;
3803 /* update locks and insert new lock after current lock */
3804 newnflp->nfl_flags |= (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK);
3805 newnflp->nfl_type = nflp->nfl_type;
3806 newnflp->nfl_start = end + 1;
3807 newnflp->nfl_end = nflp->nfl_end;
3808 nflp->nfl_end = start - 1;
3809 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
3810 nfs_lock_owner_insert_held_lock(nlop, newnflp);
3811 nextnflp = newnflp;
3812 newnflp = NULL;
3813 } else if (start > nflp->nfl_start) {
3814 /* We're unlocking the end of a lock. */
3815 if (send_unlock_rpcs) {
3816 lck_mtx_unlock(&np->n_openlock);
3817 error = nfs4_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, ctx);
3818 if (nfs_mount_state_error_should_restart(error)) {
3819 nfs_open_state_clear_busy(np);
3820 nfs_mount_state_in_use_end(nmp, error);
3821 goto restart;
3822 }
3823 lck_mtx_lock(&np->n_openlock);
3824 }
3825 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3826 if (error)
3827 break;
3828 nflp->nfl_end = start - 1;
3829 } else if (end < nflp->nfl_end) {
3830 /* We're unlocking the start of a lock. */
3831 if (send_unlock_rpcs) {
3832 lck_mtx_unlock(&np->n_openlock);
3833 error = nfs4_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, ctx);
3834 if (nfs_mount_state_error_should_restart(error)) {
3835 nfs_open_state_clear_busy(np);
3836 nfs_mount_state_in_use_end(nmp, error);
3837 goto restart;
3838 }
3839 lck_mtx_lock(&np->n_openlock);
3840 }
3841 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3842 if (error)
3843 break;
3844 nflp->nfl_start = end + 1;
3845 }
3846 if (nflp->nfl_blockcnt) {
3847 /* wake up anyone blocked on this lock */
3848 wakeup(nflp);
3849 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
3850 /* remove nflp from lock list and destroy */
3851 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
3852 nfs_file_lock_destroy(nflp);
3853 }
3854 }
3855 out:
3856 lck_mtx_unlock(&np->n_openlock);
3857 nfs_open_state_clear_busy(np);
3858 nfs_mount_state_in_use_end(nmp, 0);
3859
3860 if (newnflp)
3861 nfs_file_lock_destroy(newnflp);
3862 return (error);
3863 }
3864
3865 /*
3866 * NFSv4 advisory file locking
3867 */
3868 int
3869 nfs4_vnop_advlock(
3870 struct vnop_advlock_args /* {
3871 struct vnodeop_desc *a_desc;
3872 vnode_t a_vp;
3873 caddr_t a_id;
3874 int a_op;
3875 struct flock *a_fl;
3876 int a_flags;
3877 vfs_context_t a_context;
3878 } */ *ap)
3879 {
3880 vnode_t vp = ap->a_vp;
3881 nfsnode_t np = VTONFS(ap->a_vp);
3882 struct flock *fl = ap->a_fl;
3883 int op = ap->a_op;
3884 int flags = ap->a_flags;
3885 vfs_context_t ctx = ap->a_context;
3886 struct nfsmount *nmp;
3887 struct nfs_vattr nvattr;
3888 struct nfs_open_owner *noop = NULL;
3889 struct nfs_open_file *nofp = NULL;
3890 struct nfs_lock_owner *nlop = NULL;
3891 off_t lstart;
3892 uint64_t start, end;
3893 int error = 0, modified, style;
3894 #define OFF_MAX QUAD_MAX
3895
3896 nmp = VTONMP(ap->a_vp);
3897 if (!nmp)
3898 return (ENXIO);
3899
3900 switch (fl->l_whence) {
3901 case SEEK_SET:
3902 case SEEK_CUR:
3903 /*
3904 * Caller is responsible for adding any necessary offset
3905 * to fl->l_start when SEEK_CUR is used.
3906 */
3907 lstart = fl->l_start;
3908 break;
3909 case SEEK_END:
3910 /* need to flush, and refetch attributes to make */
3911 /* sure we have the correct end of file offset */
3912 if ((error = nfs_node_lock(np)))
3913 return (error);
3914 modified = (np->n_flag & NMODIFIED);
3915 nfs_node_unlock(np);
3916 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1))))
3917 return (error);
3918 if ((error = nfs_getattr(np, &nvattr, ctx, NGA_UNCACHED)))
3919 return (error);
3920 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
3921 if ((np->n_size > OFF_MAX) ||
3922 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start))))
3923 error = EOVERFLOW;
3924 lstart = np->n_size + fl->l_start;
3925 nfs_data_unlock(np);
3926 if (error)
3927 return (error);
3928 break;
3929 default:
3930 return (EINVAL);
3931 }
3932 if (lstart < 0)
3933 return (EINVAL);
3934 start = lstart;
3935 if (fl->l_len == 0) {
3936 end = UINT64_MAX;
3937 } else if (fl->l_len > 0) {
3938 if ((fl->l_len - 1) > (OFF_MAX - lstart))
3939 return (EOVERFLOW);
3940 end = start - 1 + fl->l_len;
3941 } else { /* l_len is negative */
3942 if ((lstart + fl->l_len) < 0)
3943 return (EINVAL);
3944 end = start - 1;
3945 start += fl->l_len;
3946 }
3947 if (error)
3948 return (error);
3949
3950 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
3951 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX)))
3952 return (EINVAL);
3953
3954 /* find the lock owner, alloc if not unlock */
3955 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
3956 if (!nlop) {
3957 error = (op == F_UNLCK) ? 0 : ENOMEM;
3958 if (error)
3959 printf("nfs4_vnop_advlock: no lock owner %d\n", error);
3960 goto out;
3961 }
3962
3963 if (op == F_GETLK) {
3964 error = nfs4_getlock(np, nlop, fl, start, end, ctx);
3965 } else {
3966 /* find the open owner */
3967 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
3968 if (!noop) {
3969 printf("nfs4_vnop_advlock: no open owner\n");
3970 error = EPERM;
3971 goto out;
3972 }
3973 /* find the open file */
3974 restart:
3975 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
3976 if (error)
3977 error = EBADF;
3978 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
3979 printf("nfs_vnop_advlock: LOST\n");
3980 error = EIO;
3981 }
3982 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
3983 nfs4_reopen(nofp, vfs_context_thread(ctx));
3984 nofp = NULL;
3985 goto restart;
3986 }
3987 if (error) {
3988 printf("nfs4_vnop_advlock: no open file %d\n", error);
3989 goto out;
3990 }
3991 if (op == F_UNLCK) {
3992 error = nfs4_unlock(np, nofp, nlop, start, end, style, ctx);
3993 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
3994 if ((op == F_SETLK) && (flags & F_WAIT))
3995 op = F_SETLKW;
3996 error = nfs4_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
3997 } else {
3998 /* not getlk, unlock or lock? */
3999 error = EINVAL;
4000 }
4001 }
4002
4003 out:
4004 if (nlop)
4005 nfs_lock_owner_rele(nlop);
4006 if (noop)
4007 nfs_open_owner_rele(noop);
4008 return (error);
4009 }
4010
4011 /*
4012 * Check if an open owner holds any locks on a file.
4013 */
4014 int
4015 nfs4_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4016 {
4017 struct nfs_lock_owner *nlop;
4018
4019 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4020 if (nlop->nlo_open_owner != noop)
4021 continue;
4022 if (!TAILQ_EMPTY(&nlop->nlo_locks))
4023 break;
4024 }
4025 return (nlop ? 1 : 0);
4026 }
4027
4028 /*
4029 * Reopen simple (no deny, no locks) open state that was lost.
4030 */
4031 void
4032 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4033 {
4034 struct nfs_open_owner *noop = nofp->nof_owner;
4035 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4036 vnode_t vp = NFSTOV(nofp->nof_np);
4037 vnode_t dvp = NULL;
4038 struct componentname cn;
4039 const char *vname = NULL;
4040 size_t namelen;
4041 char smallname[128];
4042 char *filename = NULL;
4043 int error = 0, done = 0, slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0;
4044 struct timespec ts = { 1, 0 };
4045
4046 lck_mtx_lock(&nofp->nof_lock);
4047 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4048 if ((error = nfs_sigintr(nmp, NULL, thd, 0)))
4049 break;
4050 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts);
4051 }
4052 if (!(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4053 lck_mtx_unlock(&nofp->nof_lock);
4054 return;
4055 }
4056 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4057 lck_mtx_unlock(&nofp->nof_lock);
4058
4059 dvp = vnode_getparent(vp);
4060 vname = vnode_getname(vp);
4061 if (!dvp || !vname) {
4062 error = EIO;
4063 goto out;
4064 }
4065 filename = &smallname[0];
4066 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
4067 if (namelen >= sizeof(smallname)) {
4068 namelen++; /* snprintf result doesn't include '\0' */
4069 MALLOC(filename, char *, namelen, M_TEMP, M_WAITOK);
4070 if (!filename) {
4071 error = ENOMEM;
4072 goto out;
4073 }
4074 snprintf(filename, namelen, "%s", vname);
4075 }
4076 bzero(&cn, sizeof(cn));
4077 cn.cn_nameptr = filename;
4078 cn.cn_namelen = namelen;
4079
4080 restart:
4081 done = 0;
4082 if ((error = nfs_mount_state_in_use_start(nmp)))
4083 goto out;
4084
4085 if (nofp->nof_rw)
4086 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4087 if (!error && nofp->nof_w)
4088 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4089 if (!error && nofp->nof_r)
4090 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4091
4092 if (nfs_mount_state_in_use_end(nmp, error)) {
4093 if (error == NFSERR_GRACE)
4094 goto restart;
4095 error = 0;
4096 goto out;
4097 }
4098 done = 1;
4099 out:
4100 lck_mtx_lock(&nofp->nof_lock);
4101 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4102 if (error)
4103 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
4104 if (done)
4105 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4106 else
4107 printf("nfs4_reopen: failed, error %d, lost %d\n", error, (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0);
4108 lck_mtx_unlock(&nofp->nof_lock);
4109 if (filename && (filename != &smallname[0]))
4110 FREE(filename, M_TEMP);
4111 if (vname)
4112 vnode_putname(vname);
4113 if (dvp != NULLVP)
4114 vnode_put(dvp);
4115 }
4116
4117 /*
4118 * Send a normal OPEN RPC to open/create a file.
4119 */
4120 int
4121 nfs4_open_rpc(
4122 struct nfs_open_file *nofp,
4123 vfs_context_t ctx,
4124 struct componentname *cnp,
4125 struct vnode_attr *vap,
4126 vnode_t dvp,
4127 vnode_t *vpp,
4128 int create,
4129 int share_access,
4130 int share_deny)
4131 {
4132 return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4133 cnp, vap, dvp, vpp, create, share_access, share_deny));
4134 }
4135
4136 /*
4137 * Send an OPEN RPC to reopen a file.
4138 */
4139 int
4140 nfs4_open_reopen_rpc(
4141 struct nfs_open_file *nofp,
4142 thread_t thd,
4143 kauth_cred_t cred,
4144 struct componentname *cnp,
4145 vnode_t dvp,
4146 vnode_t *vpp,
4147 int share_access,
4148 int share_deny)
4149 {
4150 return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, 0, share_access, share_deny));
4151 }
4152
4153 /*
4154 * common OPEN RPC code
4155 *
4156 * If create is set, ctx must be passed in.
4157 */
4158 int
4159 nfs4_open_rpc_internal(
4160 struct nfs_open_file *nofp,
4161 vfs_context_t ctx,
4162 thread_t thd,
4163 kauth_cred_t cred,
4164 struct componentname *cnp,
4165 struct vnode_attr *vap,
4166 vnode_t dvp,
4167 vnode_t *vpp,
4168 int create,
4169 int share_access,
4170 int share_deny)
4171 {
4172 struct nfsmount *nmp;
4173 struct nfs_open_owner *noop = nofp->nof_owner;
4174 struct nfs_vattr nvattr, dnvattr;
4175 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
4176 int nfsvers, numops, exclusive = 0, gotuid, gotgid;
4177 u_int64_t xid, savedxid = 0;
4178 nfsnode_t dnp = VTONFS(dvp);
4179 nfsnode_t np, newnp = NULL;
4180 vnode_t newvp = NULL;
4181 struct nfsm_chain nmreq, nmrep;
4182 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4183 uint32_t rflags, delegation = 0, recall = 0, val;
4184 struct nfs_stateid stateid, dstateid, *sid;
4185 fhandle_t fh;
4186 struct nfsreq *req = NULL;
4187 struct nfs_dulookup dul;
4188
4189 if (create && !ctx)
4190 return (EINVAL);
4191
4192 nmp = VTONMP(dvp);
4193 if (!nmp)
4194 return (ENXIO);
4195 nfsvers = nmp->nm_vers;
4196
4197 np = *vpp ? VTONFS(*vpp) : NULL;
4198 if (create && vap) {
4199 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
4200 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
4201 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
4202 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
4203 } else {
4204 exclusive = gotuid = gotgid = 0;
4205 }
4206 if (nofp) {
4207 sid = &nofp->nof_stateid;
4208 } else {
4209 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
4210 sid = &stateid;
4211 }
4212
4213 if ((error = nfs_open_owner_set_busy(noop, thd)))
4214 return (error);
4215 again:
4216 rflags = 0;
4217
4218 nfsm_chain_null(&nmreq);
4219 nfsm_chain_null(&nmrep);
4220
4221 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
4222 numops = 6;
4223 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
4224 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", numops);
4225 numops--;
4226 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4227 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
4228 numops--;
4229 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
4230 numops--;
4231 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4232 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4233 nfsm_chain_add_32(error, &nmreq, share_access);
4234 nfsm_chain_add_32(error, &nmreq, share_deny);
4235
4236 // open owner: clientid + uid
4237 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
4238 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4239 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
4240
4241 // openflag4
4242 nfsm_chain_add_32(error, &nmreq, create);
4243 if (create) {
4244 if (exclusive) {
4245 static uint32_t create_verf; // XXX need a better verifier
4246 create_verf++;
4247 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
4248 /* insert 64 bit verifier */
4249 nfsm_chain_add_32(error, &nmreq, create_verf);
4250 nfsm_chain_add_32(error, &nmreq, create_verf);
4251 } else {
4252 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
4253 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
4254 }
4255 }
4256
4257 // open_claim4
4258 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
4259 nfsm_chain_add_string(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen);
4260 numops--;
4261 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4262 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4263 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4264 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
4265 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4266 numops--;
4267 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
4268 numops--;
4269 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4270 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
4271 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4272 nfsm_chain_build_done(error, &nmreq);
4273 nfsm_assert(error, (numops == 0), EPROTO);
4274 if (!error)
4275 error = busyerror = nfs_node_set_busy(dnp, thd);
4276 nfsmout_if(error);
4277
4278 if (create)
4279 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
4280
4281 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, NULL, &req);
4282 if (!error) {
4283 if (create)
4284 nfs_dulookup_start(&dul, dnp, ctx);
4285 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
4286 savedxid = xid;
4287 }
4288
4289 if (create)
4290 nfs_dulookup_finish(&dul, dnp, ctx);
4291
4292 if ((lockerror = nfs_node_lock(dnp)))
4293 error = lockerror;
4294 nfsm_chain_skip_tag(error, &nmrep);
4295 nfsm_chain_get_32(error, &nmrep, numops);
4296 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4297 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
4298 nfsmout_if(error);
4299 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4300 nfs_owner_seqid_increment(noop, NULL, error);
4301 nfsm_chain_get_stateid(error, &nmrep, sid);
4302 nfsm_chain_check_change_info(error, &nmrep, dnp);
4303 nfsm_chain_get_32(error, &nmrep, rflags);
4304 bmlen = NFS_ATTR_BITMAP_LEN;
4305 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4306 nfsm_chain_get_32(error, &nmrep, delegation);
4307 if (!error)
4308 switch (delegation) {
4309 case NFS_OPEN_DELEGATE_NONE:
4310 break;
4311 case NFS_OPEN_DELEGATE_READ:
4312 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4313 nfsm_chain_get_32(error, &nmrep, recall);
4314 // ACE: (skip) XXX
4315 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4316 nfsm_chain_get_32(error, &nmrep, val); /* string length */
4317 nfsm_chain_adv(error, &nmrep, nfsm_rndup(val));
4318 break;
4319 case NFS_OPEN_DELEGATE_WRITE:
4320 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4321 nfsm_chain_get_32(error, &nmrep, recall);
4322 // space (skip) XXX
4323 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4324 // ACE: (skip) XXX
4325 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4326 nfsm_chain_get_32(error, &nmrep, val); /* string length */
4327 nfsm_chain_adv(error, &nmrep, nfsm_rndup(val));
4328 break;
4329 default:
4330 error = EBADRPC;
4331 break;
4332 }
4333 /* At this point if we have no error, the object was created/opened. */
4334 /* if we don't get attributes, then we should lookitup. */
4335 open_error = error;
4336 nfsmout_if(error);
4337 if (create && !exclusive)
4338 nfs_vattr_set_supported(bitmap, vap);
4339 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4340 nfsmout_if(error);
4341 NFS_CLEAR_ATTRIBUTES(nvattr.nva_bitmap);
4342 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL);
4343 nfsmout_if(error);
4344 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4345 printf("nfs: open/create didn't return filehandle?\n");
4346 error = EBADRPC;
4347 goto nfsmout;
4348 }
4349 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4350 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
4351 printf("nfs4_open_rpc: warning: file handle mismatch\n");
4352 }
4353 /* directory attributes: if we don't get them, make sure to invalidate */
4354 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
4355 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4356 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, NULL, &xid);
4357 if (error)
4358 NATTRINVALIDATE(dnp);
4359 nfsmout_if(error);
4360
4361 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4362 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4363
4364 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
4365 nfs_node_unlock(dnp);
4366 lockerror = ENOENT;
4367 nfsm_chain_cleanup(&nmreq);
4368 nfsm_chain_cleanup(&nmrep);
4369 // PUTFH, OPEN_CONFIRM, GETATTR
4370 numops = 3;
4371 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4372 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", numops);
4373 numops--;
4374 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4375 nfsm_chain_add_fh(error, &nmreq, nfsvers, fh.fh_data, fh.fh_len);
4376 numops--;
4377 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
4378 nfsm_chain_add_stateid(error, &nmreq, sid);
4379 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4380 numops--;
4381 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4382 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
4383 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4384 nfsm_chain_build_done(error, &nmreq);
4385 nfsm_assert(error, (numops == 0), EPROTO);
4386 nfsmout_if(error);
4387 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, 0, &nmrep, &xid, &status);
4388
4389 nfsm_chain_skip_tag(error, &nmrep);
4390 nfsm_chain_get_32(error, &nmrep, numops);
4391 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4392 nfsmout_if(error);
4393 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
4394 nfs_owner_seqid_increment(noop, NULL, error);
4395 nfsm_chain_get_stateid(error, &nmrep, sid);
4396 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4397 nfsmout_if(error);
4398 NFS_CLEAR_ATTRIBUTES(nvattr.nva_bitmap);
4399 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, NULL, NULL);
4400 nfsmout_if(error);
4401 savedxid = xid;
4402 if ((lockerror = nfs_node_lock(dnp)))
4403 error = lockerror;
4404 }
4405
4406 nfsmout:
4407 nfsm_chain_cleanup(&nmreq);
4408 nfsm_chain_cleanup(&nmrep);
4409
4410 if (!lockerror && create) {
4411 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
4412 dnp->n_flag &= ~NNEGNCENTRIES;
4413 cache_purge_negatives(dvp);
4414 }
4415 dnp->n_flag |= NMODIFIED;
4416 nfs_node_unlock(dnp);
4417 lockerror = ENOENT;
4418 nfs_getattr(dnp, &dnvattr, ctx, NGA_CACHED);
4419 }
4420 if (!lockerror)
4421 nfs_node_unlock(dnp);
4422 if (!error && create && fh.fh_len) {
4423 /* create the vnode with the filehandle and attributes */
4424 xid = savedxid;
4425 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, NG_MAKEENTRY, &newnp);
4426 if (!error)
4427 newvp = NFSTOV(newnp);
4428 }
4429 if (!busyerror)
4430 nfs_node_clear_busy(dnp);
4431 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4432 if (!np)
4433 np = newnp;
4434 if (!error && np && !recall) {
4435 /* stuff the delegation state in the node */
4436 lck_mtx_lock(&np->n_openlock);
4437 np->n_openflags &= ~N_DELEG_MASK;
4438 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
4439 np->n_dstateid = dstateid;
4440 lck_mtx_unlock(&np->n_openlock);
4441 }
4442 if (recall) {
4443 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, thd, cred);
4444 if (np) {
4445 lck_mtx_lock(&np->n_openlock);
4446 np->n_openflags &= ~N_DELEG_MASK;
4447 lck_mtx_unlock(&np->n_openlock);
4448 }
4449 }
4450 }
4451 if (error) {
4452 if (exclusive && (error == NFSERR_NOTSUPP)) {
4453 exclusive = 0;
4454 goto again;
4455 }
4456 if (newvp) {
4457 nfs_node_unlock(newnp);
4458 vnode_put(newvp);
4459 }
4460 } else if (create) {
4461 nfs_node_unlock(newnp);
4462 if (exclusive) {
4463 error = nfs4_setattr_rpc(newnp, vap, ctx);
4464 if (error && (gotuid || gotgid)) {
4465 /* it's possible the server didn't like our attempt to set IDs. */
4466 /* so, let's try it again without those */
4467 VATTR_CLEAR_ACTIVE(vap, va_uid);
4468 VATTR_CLEAR_ACTIVE(vap, va_gid);
4469 error = nfs4_setattr_rpc(newnp, vap, ctx);
4470 }
4471 }
4472 if (error)
4473 vnode_put(newvp);
4474 else
4475 *vpp = newvp;
4476 }
4477 nfs_open_owner_clear_busy(noop);
4478 return (error);
4479 }
4480
4481 /*
4482 * Send an OPEN RPC to reclaim an open file.
4483 */
4484 int
4485 nfs4_open_reclaim_rpc(
4486 struct nfs_open_file *nofp,
4487 int share_access,
4488 int share_deny)
4489 {
4490 struct nfsmount *nmp;
4491 struct nfs_open_owner *noop = nofp->nof_owner;
4492 struct nfs_vattr nvattr;
4493 int error = 0, lockerror = ENOENT, status;
4494 int nfsvers, numops;
4495 u_int64_t xid;
4496 nfsnode_t np = nofp->nof_np;
4497 struct nfsm_chain nmreq, nmrep;
4498 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
4499 uint32_t rflags = 0, delegation, recall = 0, val;
4500 fhandle_t fh;
4501 struct nfs_stateid dstateid;
4502
4503 nmp = NFSTONMP(np);
4504 if (!nmp)
4505 return (ENXIO);
4506 nfsvers = nmp->nm_vers;
4507
4508 if ((error = nfs_open_owner_set_busy(noop, current_thread())))
4509 return (error);
4510
4511 delegation = NFS_OPEN_DELEGATE_NONE;
4512
4513 nfsm_chain_null(&nmreq);
4514 nfsm_chain_null(&nmrep);
4515
4516 // PUTFH, OPEN, GETATTR(FH)
4517 numops = 3;
4518 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
4519 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", numops);
4520 numops--;
4521 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4522 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4523 numops--;
4524 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
4525 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4526 nfsm_chain_add_32(error, &nmreq, share_access);
4527 nfsm_chain_add_32(error, &nmreq, share_deny);
4528 // open owner: clientid + uid
4529 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
4530 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
4531 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
4532 // openflag4
4533 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
4534 // open_claim4
4535 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
4536 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
4537 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
4538 NFS_OPEN_DELEGATE_NONE;
4539 nfsm_chain_add_32(error, &nmreq, delegation);
4540 delegation = NFS_OPEN_DELEGATE_NONE;
4541 numops--;
4542 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4543 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
4544 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
4545 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
4546 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4547 nfsm_chain_build_done(error, &nmreq);
4548 nfsm_assert(error, (numops == 0), EPROTO);
4549 nfsmout_if(error);
4550
4551 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(), noop->noo_cred, R_RECOVER, &nmrep, &xid, &status);
4552
4553 if ((lockerror = nfs_node_lock(np)))
4554 error = lockerror;
4555 nfsm_chain_skip_tag(error, &nmrep);
4556 nfsm_chain_get_32(error, &nmrep, numops);
4557 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4558 nfsmout_if(error);
4559 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
4560 nfs_owner_seqid_increment(noop, NULL, error);
4561 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
4562 nfsm_chain_check_change_info(error, &nmrep, np);
4563 nfsm_chain_get_32(error, &nmrep, rflags);
4564 bmlen = NFS_ATTR_BITMAP_LEN;
4565 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
4566 nfsm_chain_get_32(error, &nmrep, delegation);
4567 if (!error)
4568 switch (delegation) {
4569 case NFS_OPEN_DELEGATE_NONE:
4570 break;
4571 case NFS_OPEN_DELEGATE_READ:
4572 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4573 nfsm_chain_get_32(error, &nmrep, recall);
4574 // ACE: (skip) XXX
4575 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4576 nfsm_chain_get_32(error, &nmrep, val); /* string length */
4577 nfsm_chain_adv(error, &nmrep, nfsm_rndup(val));
4578 if (!error) {
4579 /* stuff the delegation state in the node */
4580 lck_mtx_lock(&np->n_openlock);
4581 np->n_openflags &= ~N_DELEG_MASK;
4582 np->n_openflags |= N_DELEG_READ;
4583 np->n_dstateid = dstateid;
4584 lck_mtx_unlock(&np->n_openlock);
4585 }
4586 break;
4587 case NFS_OPEN_DELEGATE_WRITE:
4588 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
4589 nfsm_chain_get_32(error, &nmrep, recall);
4590 // space (skip) XXX
4591 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4592 // ACE: (skip) XXX
4593 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
4594 nfsm_chain_get_32(error, &nmrep, val); /* string length */
4595 nfsm_chain_adv(error, &nmrep, nfsm_rndup(val));
4596 if (!error) {
4597 /* stuff the delegation state in the node */
4598 lck_mtx_lock(&np->n_openlock);
4599 np->n_openflags &= ~N_DELEG_MASK;
4600 np->n_openflags |= N_DELEG_WRITE;
4601 np->n_dstateid = dstateid;
4602 lck_mtx_unlock(&np->n_openlock);
4603 }
4604 break;
4605 default:
4606 error = EBADRPC;
4607 break;
4608 }
4609 nfsmout_if(error);
4610 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4611 NFS_CLEAR_ATTRIBUTES(nvattr.nva_bitmap);
4612 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL);
4613 nfsmout_if(error);
4614 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
4615 printf("nfs: open reclaim didn't return filehandle?\n");
4616 error = EBADRPC;
4617 goto nfsmout;
4618 }
4619 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
4620 // XXX what if fh doesn't match the vnode we think we're re-opening?
4621 printf("nfs4_open_reclaim_rpc: warning: file handle mismatch\n");
4622 }
4623 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
4624 nfsmout_if(error);
4625 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX)
4626 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
4627 nfsmout:
4628 nfsm_chain_cleanup(&nmreq);
4629 nfsm_chain_cleanup(&nmrep);
4630 if (!lockerror)
4631 nfs_node_unlock(np);
4632 nfs_open_owner_clear_busy(noop);
4633 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
4634 if (recall) {
4635 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, current_thread(), noop->noo_cred);
4636 lck_mtx_lock(&np->n_openlock);
4637 np->n_openflags &= ~N_DELEG_MASK;
4638 lck_mtx_unlock(&np->n_openlock);
4639 }
4640 }
4641 return (error);
4642 }
4643
4644 int
4645 nfs4_open_downgrade_rpc(
4646 nfsnode_t np,
4647 struct nfs_open_file *nofp,
4648 vfs_context_t ctx)
4649 {
4650 struct nfs_open_owner *noop = nofp->nof_owner;
4651 struct nfsmount *nmp;
4652 int error, lockerror = ENOENT, status, nfsvers, numops;
4653 struct nfsm_chain nmreq, nmrep;
4654 u_int64_t xid;
4655
4656 nmp = NFSTONMP(np);
4657 if (!nmp)
4658 return (ENXIO);
4659 nfsvers = nmp->nm_vers;
4660
4661 if ((error = nfs_open_owner_set_busy(noop, vfs_context_thread(ctx))))
4662 return (error);
4663
4664 nfsm_chain_null(&nmreq);
4665 nfsm_chain_null(&nmrep);
4666
4667 // PUTFH, OPEN_DOWNGRADE, GETATTR
4668 numops = 3;
4669 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4670 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", numops);
4671 numops--;
4672 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4673 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4674 numops--;
4675 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
4676 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
4677 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4678 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
4679 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
4680 numops--;
4681 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4682 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
4683 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4684 nfsm_chain_build_done(error, &nmreq);
4685 nfsm_assert(error, (numops == 0), EPROTO);
4686 nfsmout_if(error);
4687 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
4688
4689 if ((lockerror = nfs_node_lock(np)))
4690 error = lockerror;
4691 nfsm_chain_skip_tag(error, &nmrep);
4692 nfsm_chain_get_32(error, &nmrep, numops);
4693 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4694 nfsmout_if(error);
4695 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
4696 nfs_owner_seqid_increment(noop, NULL, error);
4697 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
4698 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4699 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
4700 nfsmout:
4701 if (!lockerror)
4702 nfs_node_unlock(np);
4703 nfs_open_owner_clear_busy(noop);
4704 nfsm_chain_cleanup(&nmreq);
4705 nfsm_chain_cleanup(&nmrep);
4706 return (error);
4707 }
4708
4709 int
4710 nfs4_close_rpc(
4711 nfsnode_t np,
4712 struct nfs_open_file *nofp,
4713 thread_t thd,
4714 kauth_cred_t cred,
4715 int flag)
4716 {
4717 struct nfs_open_owner *noop = nofp->nof_owner;
4718 struct nfsmount *nmp;
4719 int error, lockerror = ENOENT, status, nfsvers, numops;
4720 struct nfsm_chain nmreq, nmrep;
4721 u_int64_t xid;
4722
4723 nmp = NFSTONMP(np);
4724 if (!nmp)
4725 return (ENXIO);
4726 nfsvers = nmp->nm_vers;
4727
4728 if ((error = nfs_open_owner_set_busy(noop, thd)))
4729 return (error);
4730
4731 nfsm_chain_null(&nmreq);
4732 nfsm_chain_null(&nmrep);
4733
4734 // PUTFH, CLOSE, GETFH
4735 numops = 3;
4736 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
4737 nfsm_chain_add_compound_header(error, &nmreq, "close", numops);
4738 numops--;
4739 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4740 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4741 numops--;
4742 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
4743 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
4744 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
4745 numops--;
4746 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
4747 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
4748 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
4749 nfsm_chain_build_done(error, &nmreq);
4750 nfsm_assert(error, (numops == 0), EPROTO);
4751 nfsmout_if(error);
4752 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, flag, &nmrep, &xid, &status);
4753
4754 if ((lockerror = nfs_node_lock(np)))
4755 error = lockerror;
4756 nfsm_chain_skip_tag(error, &nmrep);
4757 nfsm_chain_get_32(error, &nmrep, numops);
4758 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4759 nfsmout_if(error);
4760 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
4761 nfs_owner_seqid_increment(noop, NULL, error);
4762 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
4763 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
4764 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
4765 nfsmout:
4766 if (!lockerror)
4767 nfs_node_unlock(np);
4768 nfs_open_owner_clear_busy(noop);
4769 nfsm_chain_cleanup(&nmreq);
4770 nfsm_chain_cleanup(&nmrep);
4771 return (error);
4772 }
4773
4774
4775 int
4776 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, thread_t thd, kauth_cred_t cred)
4777 {
4778 int error = 0, status, numops;
4779 uint64_t xid;
4780 struct nfsm_chain nmreq, nmrep;
4781
4782 nfsm_chain_null(&nmreq);
4783 nfsm_chain_null(&nmrep);
4784
4785 // PUTFH, DELEGRETURN
4786 numops = 2;
4787 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
4788 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", numops);
4789 numops--;
4790 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
4791 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
4792 numops--;
4793 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
4794 nfsm_chain_add_stateid(error, &nmreq, sid);
4795 nfsm_chain_build_done(error, &nmreq);
4796 nfsm_assert(error, (numops == 0), EPROTO);
4797 nfsmout_if(error);
4798 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, R_RECOVER, &nmrep, &xid, &status);
4799 nfsm_chain_skip_tag(error, &nmrep);
4800 nfsm_chain_get_32(error, &nmrep, numops);
4801 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
4802 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
4803 nfsmout:
4804 nfsm_chain_cleanup(&nmreq);
4805 nfsm_chain_cleanup(&nmrep);
4806 return (error);
4807 }
4808
4809
4810 /*
4811 * NFSv4 read call.
4812 * Just call nfs_bioread() to do the work.
4813 *
4814 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
4815 * without first calling VNOP_OPEN, so we make sure the file is open here.
4816 */
4817 int
4818 nfs4_vnop_read(
4819 struct vnop_read_args /* {
4820 struct vnodeop_desc *a_desc;
4821 vnode_t a_vp;
4822 struct uio *a_uio;
4823 int a_ioflag;
4824 vfs_context_t a_context;
4825 } */ *ap)
4826 {
4827 vnode_t vp = ap->a_vp;
4828 vfs_context_t ctx = ap->a_context;
4829 nfsnode_t np;
4830 struct nfsmount *nmp;
4831 struct nfs_open_owner *noop;
4832 struct nfs_open_file *nofp;
4833 int error;
4834
4835 if (vnode_vtype(ap->a_vp) != VREG)
4836 return (EPERM);
4837
4838 np = VTONFS(vp);
4839 nmp = NFSTONMP(np);
4840 if (!nmp)
4841 return (ENXIO);
4842
4843 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
4844 if (!noop)
4845 return (ENOMEM);
4846 restart:
4847 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
4848 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4849 printf("nfs_vnop_read: LOST\n");
4850 error = EIO;
4851 }
4852 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4853 nfs4_reopen(nofp, vfs_context_thread(ctx));
4854 nofp = NULL;
4855 goto restart;
4856 }
4857 if (error) {
4858 nfs_open_owner_rele(noop);
4859 return (error);
4860 }
4861 if (!nofp->nof_access) {
4862 /* we don't have the file open, so open it for read access */
4863 error = nfs_mount_state_in_use_start(nmp);
4864 if (error) {
4865 nfs_open_owner_rele(noop);
4866 return (error);
4867 }
4868 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
4869 if (error)
4870 nofp = NULL;
4871 if (!error)
4872 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
4873 if (!error)
4874 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
4875 if (nofp)
4876 nfs_open_file_clear_busy(nofp);
4877 if (nfs_mount_state_in_use_end(nmp, error)) {
4878 nofp = NULL;
4879 goto restart;
4880 }
4881 }
4882 nfs_open_owner_rele(noop);
4883 if (error)
4884 return (error);
4885 return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context));
4886 }
4887
4888 /*
4889 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
4890 * Files are created using the NFSv4 OPEN RPC. So we must open the
4891 * file to create it and then close it.
4892 */
4893 int
4894 nfs4_vnop_create(
4895 struct vnop_create_args /* {
4896 struct vnodeop_desc *a_desc;
4897 vnode_t a_dvp;
4898 vnode_t *a_vpp;
4899 struct componentname *a_cnp;
4900 struct vnode_attr *a_vap;
4901 vfs_context_t a_context;
4902 } */ *ap)
4903 {
4904 vfs_context_t ctx = ap->a_context;
4905 struct componentname *cnp = ap->a_cnp;
4906 struct vnode_attr *vap = ap->a_vap;
4907 vnode_t dvp = ap->a_dvp;
4908 vnode_t *vpp = ap->a_vpp;
4909 struct nfsmount *nmp;
4910 nfsnode_t np;
4911 int error = 0;
4912 struct nfs_open_owner *noop = NULL;
4913 struct nfs_open_file *nofp = NULL;
4914
4915 nmp = VTONMP(dvp);
4916 if (!nmp)
4917 return (ENXIO);
4918
4919 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
4920
4921 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
4922 if (!noop)
4923 return (ENOMEM);
4924
4925 restart:
4926 error = nfs_mount_state_in_use_start(nmp);
4927 if (error) {
4928 nfs_open_owner_rele(noop);
4929 return (error);
4930 }
4931
4932 error = nfs_open_file_find(NULL, noop, &nofp, 0, 0, 1);
4933 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4934 printf("nfs_vnop_create: LOST\n");
4935 error = EIO;
4936 }
4937 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4938 nfs_mount_state_in_use_end(nmp, 0);
4939 nfs4_reopen(nofp, vfs_context_thread(ctx));
4940 nofp = NULL;
4941 goto restart;
4942 }
4943 if (!error)
4944 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
4945 if (error) {
4946 nofp = NULL;
4947 goto out;
4948 }
4949
4950 nofp->nof_opencnt++;
4951 nofp->nof_access = NFS_OPEN_SHARE_ACCESS_BOTH;
4952 nofp->nof_deny = NFS_OPEN_SHARE_DENY_NONE;
4953 nofp->nof_rw++;
4954
4955 error = nfs4_open_rpc(nofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE,
4956 NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4957 if (!error && !*vpp) {
4958 printf("nfs4_open_rpc returned without a node?\n");
4959 /* Hmmm... with no node, we have no filehandle and can't close it */
4960 error = EIO;
4961 }
4962 if (error) {
4963 nofp->nof_rw--;
4964 nofp->nof_access = 0;
4965 nofp->nof_deny = 0;
4966 nofp->nof_opencnt--;
4967 }
4968 if (*vpp) {
4969 nofp->nof_np = np = VTONFS(*vpp);
4970 /* insert nofp onto np's open list */
4971 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
4972 if (!error) {
4973 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
4974 nofp->nof_creator = current_thread();
4975 }
4976 }
4977 out:
4978 if (nofp)
4979 nfs_open_file_clear_busy(nofp);
4980 if (nfs_mount_state_in_use_end(nmp, error)) {
4981 nofp = NULL;
4982 goto restart;
4983 }
4984 if (noop)
4985 nfs_open_owner_rele(noop);
4986 return (error);
4987 }
4988
4989 void
4990 nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp, struct vnode_attr *vap, vfs_context_t ctx)
4991 {
4992 /*
4993 * Don't bother setting UID if it's the same as the credential performing the create.
4994 * Don't bother setting GID if it's the same as the directory or credential.
4995 */
4996 if (VATTR_IS_ACTIVE(vap, va_uid)) {
4997 if (kauth_cred_getuid(vfs_context_ucred(ctx)) == vap->va_uid)
4998 VATTR_CLEAR_ACTIVE(vap, va_uid);
4999 }
5000 if (VATTR_IS_ACTIVE(vap, va_gid)) {
5001 if ((vap->va_gid == dnp->n_vattr.nva_gid) ||
5002 (kauth_cred_getgid(vfs_context_ucred(ctx)) == vap->va_gid))
5003 VATTR_CLEAR_ACTIVE(vap, va_gid);
5004 }
5005 }
5006
5007 /*
5008 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
5009 */
5010 int
5011 nfs4_create_rpc(
5012 vfs_context_t ctx,
5013 nfsnode_t dnp,
5014 struct componentname *cnp,
5015 struct vnode_attr *vap,
5016 int type,
5017 char *link,
5018 nfsnode_t *npp)
5019 {
5020 struct nfsmount *nmp;
5021 struct nfs_vattr nvattr, dnvattr;
5022 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5023 int nfsvers, numops;
5024 u_int64_t xid, savedxid = 0;
5025 nfsnode_t np = NULL;
5026 vnode_t newvp = NULL;
5027 struct nfsm_chain nmreq, nmrep;
5028 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5029 const char *tag;
5030 nfs_specdata sd;
5031 fhandle_t fh;
5032 struct nfsreq *req = NULL;
5033 struct nfs_dulookup dul;
5034
5035 nmp = NFSTONMP(dnp);
5036 if (!nmp)
5037 return (ENXIO);
5038 nfsvers = nmp->nm_vers;
5039
5040 sd.specdata1 = sd.specdata2 = 0;
5041
5042 switch (type) {
5043 case NFLNK:
5044 tag = "symlink";
5045 break;
5046 case NFBLK:
5047 case NFCHR:
5048 tag = "mknod";
5049 if (!VATTR_IS_ACTIVE(vap, va_rdev))
5050 return (EINVAL);
5051 sd.specdata1 = major(vap->va_rdev);
5052 sd.specdata2 = minor(vap->va_rdev);
5053 break;
5054 case NFSOCK:
5055 case NFFIFO:
5056 tag = "mknod";
5057 break;
5058 case NFDIR:
5059 tag = "mkdir";
5060 break;
5061 default:
5062 return (EINVAL);
5063 }
5064
5065 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5066
5067 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
5068 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5069
5070 nfsm_chain_null(&nmreq);
5071 nfsm_chain_null(&nmrep);
5072
5073 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
5074 numops = 6;
5075 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
5076 nfsm_chain_add_compound_header(error, &nmreq, tag, numops);
5077 numops--;
5078 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5079 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5080 numops--;
5081 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5082 numops--;
5083 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
5084 nfsm_chain_add_32(error, &nmreq, type);
5085 if (type == NFLNK) {
5086 nfsm_chain_add_string(error, &nmreq, link, strlen(link));
5087 } else if ((type == NFBLK) || (type == NFCHR)) {
5088 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
5089 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
5090 }
5091 nfsm_chain_add_string(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen);
5092 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5093 numops--;
5094 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5095 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5096 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5097 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
5098 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
5099 numops--;
5100 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5101 numops--;
5102 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5103 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
5104 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
5105 nfsm_chain_build_done(error, &nmreq);
5106 nfsm_assert(error, (numops == 0), EPROTO);
5107 nfsmout_if(error);
5108
5109 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
5110 vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, &req);
5111 if (!error) {
5112 nfs_dulookup_start(&dul, dnp, ctx);
5113 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5114 }
5115
5116 if ((lockerror = nfs_node_lock(dnp)))
5117 error = lockerror;
5118 nfsm_chain_skip_tag(error, &nmrep);
5119 nfsm_chain_get_32(error, &nmrep, numops);
5120 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5121 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5122 nfsmout_if(error);
5123 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
5124 nfsm_chain_check_change_info(error, &nmrep, dnp);
5125 bmlen = NFS_ATTR_BITMAP_LEN;
5126 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5127 /* At this point if we have no error, the object was created. */
5128 /* if we don't get attributes, then we should lookitup. */
5129 create_error = error;
5130 nfsmout_if(error);
5131 nfs_vattr_set_supported(bitmap, vap);
5132 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5133 nfsmout_if(error);
5134 NFS_CLEAR_ATTRIBUTES(nvattr.nva_bitmap);
5135 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL);
5136 nfsmout_if(error);
5137 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5138 printf("nfs: create/%s didn't return filehandle?\n", tag);
5139 error = EBADRPC;
5140 goto nfsmout;
5141 }
5142 /* directory attributes: if we don't get them, make sure to invalidate */
5143 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5144 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5145 savedxid = xid;
5146 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, NULL, &xid);
5147 if (error)
5148 NATTRINVALIDATE(dnp);
5149
5150 nfsmout:
5151 nfsm_chain_cleanup(&nmreq);
5152 nfsm_chain_cleanup(&nmrep);
5153
5154 if (!lockerror) {
5155 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
5156 dnp->n_flag &= ~NNEGNCENTRIES;
5157 cache_purge_negatives(NFSTOV(dnp));
5158 }
5159 dnp->n_flag |= NMODIFIED;
5160 nfs_node_unlock(dnp);
5161 /* nfs_getattr() will check changed and purge caches */
5162 nfs_getattr(dnp, &dnvattr, ctx, NGA_CACHED);
5163 }
5164
5165 if (!error && fh.fh_len) {
5166 /* create the vnode with the filehandle and attributes */
5167 xid = savedxid;
5168 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, NG_MAKEENTRY, &np);
5169 if (!error)
5170 newvp = NFSTOV(np);
5171 }
5172
5173 nfs_dulookup_finish(&dul, dnp, ctx);
5174
5175 /*
5176 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
5177 * if we can succeed in looking up the object.
5178 */
5179 if ((create_error == EEXIST) || (!create_error && !newvp)) {
5180 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
5181 if (!error) {
5182 newvp = NFSTOV(np);
5183 if (vnode_vtype(newvp) != VLNK)
5184 error = EEXIST;
5185 }
5186 }
5187 if (!busyerror)
5188 nfs_node_clear_busy(dnp);
5189 if (error) {
5190 if (newvp) {
5191 nfs_node_unlock(np);
5192 vnode_put(newvp);
5193 }
5194 } else {
5195 nfs_node_unlock(np);
5196 *npp = np;
5197 }
5198 return (error);
5199 }
5200
5201 int
5202 nfs4_vnop_mknod(
5203 struct vnop_mknod_args /* {
5204 struct vnodeop_desc *a_desc;
5205 vnode_t a_dvp;
5206 vnode_t *a_vpp;
5207 struct componentname *a_cnp;
5208 struct vnode_attr *a_vap;
5209 vfs_context_t a_context;
5210 } */ *ap)
5211 {
5212 nfsnode_t np = NULL;
5213 struct nfsmount *nmp;
5214 int error;
5215
5216 nmp = VTONMP(ap->a_dvp);
5217 if (!nmp)
5218 return (ENXIO);
5219
5220 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type))
5221 return (EINVAL);
5222 switch (ap->a_vap->va_type) {
5223 case VBLK:
5224 case VCHR:
5225 case VFIFO:
5226 case VSOCK:
5227 break;
5228 default:
5229 return (ENOTSUP);
5230 }
5231
5232 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
5233 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
5234 if (!error)
5235 *ap->a_vpp = NFSTOV(np);
5236 return (error);
5237 }
5238
5239 int
5240 nfs4_vnop_mkdir(
5241 struct vnop_mkdir_args /* {
5242 struct vnodeop_desc *a_desc;
5243 vnode_t a_dvp;
5244 vnode_t *a_vpp;
5245 struct componentname *a_cnp;
5246 struct vnode_attr *a_vap;
5247 vfs_context_t a_context;
5248 } */ *ap)
5249 {
5250 nfsnode_t np = NULL;
5251 int error;
5252
5253 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
5254 NFDIR, NULL, &np);
5255 if (!error)
5256 *ap->a_vpp = NFSTOV(np);
5257 return (error);
5258 }
5259
5260 int
5261 nfs4_vnop_symlink(
5262 struct vnop_symlink_args /* {
5263 struct vnodeop_desc *a_desc;
5264 vnode_t a_dvp;
5265 vnode_t *a_vpp;
5266 struct componentname *a_cnp;
5267 struct vnode_attr *a_vap;
5268 char *a_target;
5269 vfs_context_t a_context;
5270 } */ *ap)
5271 {
5272 nfsnode_t np = NULL;
5273 int error;
5274
5275 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
5276 NFLNK, ap->a_target, &np);
5277 if (!error)
5278 *ap->a_vpp = NFSTOV(np);
5279 return (error);
5280 }
5281
5282 int
5283 nfs4_vnop_link(
5284 struct vnop_link_args /* {
5285 struct vnodeop_desc *a_desc;
5286 vnode_t a_vp;
5287 vnode_t a_tdvp;
5288 struct componentname *a_cnp;
5289 vfs_context_t a_context;
5290 } */ *ap)
5291 {
5292 vfs_context_t ctx = ap->a_context;
5293 vnode_t vp = ap->a_vp;
5294 vnode_t tdvp = ap->a_tdvp;
5295 struct componentname *cnp = ap->a_cnp;
5296 int error = 0, lockerror = ENOENT, status;
5297 struct nfsmount *nmp;
5298 nfsnode_t np = VTONFS(vp);
5299 nfsnode_t tdnp = VTONFS(tdvp);
5300 int nfsvers, numops;
5301 u_int64_t xid, savedxid;
5302 struct nfsm_chain nmreq, nmrep;
5303
5304 if (vnode_mount(vp) != vnode_mount(tdvp))
5305 return (EXDEV);
5306
5307 nmp = VTONMP(vp);
5308 if (!nmp)
5309 return (ENXIO);
5310 nfsvers = nmp->nm_vers;
5311
5312 /*
5313 * Push all writes to the server, so that the attribute cache
5314 * doesn't get "out of sync" with the server.
5315 * XXX There should be a better way!
5316 */
5317 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
5318
5319 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx))))
5320 return (error);
5321
5322 nfsm_chain_null(&nmreq);
5323 nfsm_chain_null(&nmrep);
5324
5325 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
5326 numops = 7;
5327 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
5328 nfsm_chain_add_compound_header(error, &nmreq, "link", numops);
5329 numops--;
5330 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5331 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5332 numops--;
5333 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5334 numops--;
5335 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5336 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
5337 numops--;
5338 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
5339 nfsm_chain_add_string(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen);
5340 numops--;
5341 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5342 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
5343 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
5344 numops--;
5345 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5346 numops--;
5347 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5348 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
5349 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
5350 nfsm_chain_build_done(error, &nmreq);
5351 nfsm_assert(error, (numops == 0), EPROTO);
5352 nfsmout_if(error);
5353 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &nmrep, &xid, &status);
5354
5355 if ((lockerror = nfs_node_lock2(tdnp, np))) {
5356 error = lockerror;
5357 goto nfsmout;
5358 }
5359 nfsm_chain_skip_tag(error, &nmrep);
5360 nfsm_chain_get_32(error, &nmrep, numops);
5361 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5362 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5363 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5364 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
5365 nfsm_chain_check_change_info(error, &nmrep, tdnp);
5366 /* directory attributes: if we don't get them, make sure to invalidate */
5367 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5368 savedxid = xid;
5369 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, NULL, &xid);
5370 if (error)
5371 NATTRINVALIDATE(tdnp);
5372 /* link attributes: if we don't get them, make sure to invalidate */
5373 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5374 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5375 xid = savedxid;
5376 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, NULL, &xid);
5377 if (error)
5378 NATTRINVALIDATE(np);
5379 nfsmout:
5380 nfsm_chain_cleanup(&nmreq);
5381 nfsm_chain_cleanup(&nmrep);
5382 if (!lockerror)
5383 tdnp->n_flag |= NMODIFIED;
5384 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
5385 if (error == EEXIST)
5386 error = 0;
5387 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
5388 tdnp->n_flag &= ~NNEGNCENTRIES;
5389 cache_purge_negatives(tdvp);
5390 }
5391 if (!lockerror)
5392 nfs_node_unlock2(tdnp, np);
5393 nfs_node_clear_busy2(tdnp, np);
5394 return (error);
5395 }
5396
5397 int
5398 nfs4_vnop_rmdir(
5399 struct vnop_rmdir_args /* {
5400 struct vnodeop_desc *a_desc;
5401 vnode_t a_dvp;
5402 vnode_t a_vp;
5403 struct componentname *a_cnp;
5404 vfs_context_t a_context;
5405 } */ *ap)
5406 {
5407 vfs_context_t ctx = ap->a_context;
5408 vnode_t vp = ap->a_vp;
5409 vnode_t dvp = ap->a_dvp;
5410 struct componentname *cnp = ap->a_cnp;
5411 int error = 0;
5412 nfsnode_t np = VTONFS(vp);
5413 nfsnode_t dnp = VTONFS(dvp);
5414 struct nfs_vattr dnvattr;
5415 struct nfs_dulookup dul;
5416
5417 if (vnode_vtype(vp) != VDIR)
5418 return (EINVAL);
5419
5420 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx))))
5421 return (error);
5422
5423 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5424 nfs_dulookup_start(&dul, dnp, ctx);
5425
5426 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
5427 vfs_context_thread(ctx), vfs_context_ucred(ctx));
5428
5429 nfs_name_cache_purge(dnp, np, cnp, ctx);
5430 /* nfs_getattr() will check changed and purge caches */
5431 nfs_getattr(dnp, &dnvattr, ctx, NGA_CACHED);
5432 nfs_dulookup_finish(&dul, dnp, ctx);
5433 nfs_node_clear_busy2(dnp, np);
5434
5435 /*
5436 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
5437 */
5438 if (error == ENOENT)
5439 error = 0;
5440 if (!error) {
5441 /*
5442 * remove nfsnode from hash now so we can't accidentally find it
5443 * again if another object gets created with the same filehandle
5444 * before this vnode gets reclaimed
5445 */
5446 lck_mtx_lock(nfs_node_hash_mutex);
5447 if (np->n_hflag & NHHASHED) {
5448 LIST_REMOVE(np, n_hash);
5449 np->n_hflag &= ~NHHASHED;
5450 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
5451 }
5452 lck_mtx_unlock(nfs_node_hash_mutex);
5453 }
5454 return (error);
5455 }
5456