]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
223ae28f7dc8db766d7fca14eaa25086a310f882
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vnode op calls for NFS version 4
31 */
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/systm.h>
35 #include <sys/resourcevar.h>
36 #include <sys/proc_internal.h>
37 #include <sys/kauth.h>
38 #include <sys/mount_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/kpi_mbuf.h>
41 #include <sys/conf.h>
42 #include <sys/vnode_internal.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/lockf.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/attr.h>
48 #include <sys/signalvar.h>
49 #include <sys/uio_internal.h>
50 #include <sys/xattr.h>
51 #include <sys/paths.h>
52
53 #include <vfs/vfs_support.h>
54
55 #include <sys/vm.h>
56
57 #include <sys/time.h>
58 #include <kern/clock.h>
59 #include <libkern/OSAtomic.h>
60
61 #include <miscfs/fifofs/fifo.h>
62 #include <miscfs/specfs/specdev.h>
63
64 #include <nfs/rpcv2.h>
65 #include <nfs/nfsproto.h>
66 #include <nfs/nfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_gss.h>
69 #include <nfs/nfsmount.h>
70 #include <nfs/nfs_lock.h>
71 #include <nfs/xdr_subs.h>
72 #include <nfs/nfsm_subs.h>
73
74 #include <net/if.h>
75 #include <netinet/in.h>
76 #include <netinet/in_var.h>
77 #include <vm/vm_kern.h>
78
79 #include <kern/task.h>
80 #include <kern/sched_prim.h>
81
82 int
83 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
84 {
85 int error = 0, lockerror = ENOENT, status, numops, slot;
86 u_int64_t xid;
87 struct nfsm_chain nmreq, nmrep;
88 struct timeval now;
89 uint32_t access_result = 0, supported = 0, missing;
90 struct nfsmount *nmp = NFSTONMP(np);
91 int nfsvers = nmp->nm_vers;
92 uid_t uid;
93 struct nfsreq_secinfo_args si;
94
95 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
96 return 0;
97 }
98
99 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
100 nfsm_chain_null(&nmreq);
101 nfsm_chain_null(&nmrep);
102
103 // PUTFH, ACCESS, GETATTR
104 numops = 3;
105 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
106 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
107 numops--;
108 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
109 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
110 numops--;
111 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
112 nfsm_chain_add_32(error, &nmreq, *access);
113 numops--;
114 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
115 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
116 nfsm_chain_build_done(error, &nmreq);
117 nfsm_assert(error, (numops == 0), EPROTO);
118 nfsmout_if(error);
119 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
120 vfs_context_thread(ctx), vfs_context_ucred(ctx),
121 &si, rpcflags, &nmrep, &xid, &status);
122
123 if ((lockerror = nfs_node_lock(np))) {
124 error = lockerror;
125 }
126 nfsm_chain_skip_tag(error, &nmrep);
127 nfsm_chain_get_32(error, &nmrep, numops);
128 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
129 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
130 nfsm_chain_get_32(error, &nmrep, supported);
131 nfsm_chain_get_32(error, &nmrep, access_result);
132 nfsmout_if(error);
133 if ((missing = (*access & ~supported))) {
134 /* missing support for something(s) we wanted */
135 if (missing & NFS_ACCESS_DELETE) {
136 /*
137 * If the server doesn't report DELETE (possible
138 * on UNIX systems), we'll assume that it is OK
139 * and just let any subsequent delete action fail
140 * if it really isn't deletable.
141 */
142 access_result |= NFS_ACCESS_DELETE;
143 }
144 }
145 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
146 if (nfs_access_dotzfs) {
147 vnode_t dvp = NULLVP;
148 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
149 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
150 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
151 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
152 }
153 if (dvp != NULLVP) {
154 vnode_put(dvp);
155 }
156 }
157 /* Some servers report DELETE support but erroneously give a denied answer. */
158 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
159 access_result |= NFS_ACCESS_DELETE;
160 }
161 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
162 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
163 nfsmout_if(error);
164
165 if (nfs_mount_gone(nmp)) {
166 error = ENXIO;
167 }
168 nfsmout_if(error);
169
170 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
171 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
172 } else {
173 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
174 }
175 slot = nfs_node_access_slot(np, uid, 1);
176 np->n_accessuid[slot] = uid;
177 microuptime(&now);
178 np->n_accessstamp[slot] = now.tv_sec;
179 np->n_access[slot] = access_result;
180
181 /* pass back the access returned with this request */
182 *access = np->n_access[slot];
183 nfsmout:
184 if (!lockerror) {
185 nfs_node_unlock(np);
186 }
187 nfsm_chain_cleanup(&nmreq);
188 nfsm_chain_cleanup(&nmrep);
189 return error;
190 }
191
192 int
193 nfs4_getattr_rpc(
194 nfsnode_t np,
195 mount_t mp,
196 u_char *fhp,
197 size_t fhsize,
198 int flags,
199 vfs_context_t ctx,
200 struct nfs_vattr *nvap,
201 u_int64_t *xidp)
202 {
203 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
204 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
205 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
206 struct nfsm_chain nmreq, nmrep;
207 struct nfsreq_secinfo_args si;
208
209 if (nfs_mount_gone(nmp)) {
210 return ENXIO;
211 }
212 nfsvers = nmp->nm_vers;
213 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
214
215 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
216 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
217 return 0;
218 }
219
220 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
221 rpcflags = R_RECOVER;
222 }
223
224 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
225 rpcflags |= R_SOFT;
226 }
227
228 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
229 nfsm_chain_null(&nmreq);
230 nfsm_chain_null(&nmrep);
231
232 // PUTFH, GETATTR
233 numops = 2;
234 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
235 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
236 numops--;
237 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
238 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
239 numops--;
240 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
241 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
242 if ((flags & NGA_ACL) && acls) {
243 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
244 }
245 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
246 nfsm_chain_build_done(error, &nmreq);
247 nfsm_assert(error, (numops == 0), EPROTO);
248 nfsmout_if(error);
249 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
250 vfs_context_thread(ctx), vfs_context_ucred(ctx),
251 NULL, rpcflags, &nmrep, xidp, &status);
252
253 nfsm_chain_skip_tag(error, &nmrep);
254 nfsm_chain_get_32(error, &nmrep, numops);
255 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
256 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
257 nfsmout_if(error);
258 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
259 nfsmout_if(error);
260 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
261 /* we asked for the ACL but didn't get one... assume there isn't one */
262 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
263 nvap->nva_acl = NULL;
264 }
265 nfsmout:
266 nfsm_chain_cleanup(&nmreq);
267 nfsm_chain_cleanup(&nmrep);
268 return error;
269 }
270
271 int
272 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
273 {
274 struct nfsmount *nmp;
275 int error = 0, lockerror = ENOENT, status, numops;
276 uint32_t len = 0;
277 u_int64_t xid;
278 struct nfsm_chain nmreq, nmrep;
279 struct nfsreq_secinfo_args si;
280
281 nmp = NFSTONMP(np);
282 if (nfs_mount_gone(nmp)) {
283 return ENXIO;
284 }
285 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
286 return EINVAL;
287 }
288 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
289 nfsm_chain_null(&nmreq);
290 nfsm_chain_null(&nmrep);
291
292 // PUTFH, GETATTR, READLINK
293 numops = 3;
294 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
295 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
296 numops--;
297 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
298 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
299 numops--;
300 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
301 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
302 numops--;
303 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
304 nfsm_chain_build_done(error, &nmreq);
305 nfsm_assert(error, (numops == 0), EPROTO);
306 nfsmout_if(error);
307 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
308
309 if ((lockerror = nfs_node_lock(np))) {
310 error = lockerror;
311 }
312 nfsm_chain_skip_tag(error, &nmrep);
313 nfsm_chain_get_32(error, &nmrep, numops);
314 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
315 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
316 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
317 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
318 nfsm_chain_get_32(error, &nmrep, len);
319 nfsmout_if(error);
320 if (len >= *buflenp) {
321 if (np->n_size && (np->n_size < *buflenp)) {
322 len = np->n_size;
323 } else {
324 len = *buflenp - 1;
325 }
326 }
327 nfsm_chain_get_opaque(error, &nmrep, len, buf);
328 if (!error) {
329 *buflenp = len;
330 }
331 nfsmout:
332 if (!lockerror) {
333 nfs_node_unlock(np);
334 }
335 nfsm_chain_cleanup(&nmreq);
336 nfsm_chain_cleanup(&nmrep);
337 return error;
338 }
339
340 int
341 nfs4_read_rpc_async(
342 nfsnode_t np,
343 off_t offset,
344 size_t len,
345 thread_t thd,
346 kauth_cred_t cred,
347 struct nfsreq_cbinfo *cb,
348 struct nfsreq **reqp)
349 {
350 struct nfsmount *nmp;
351 int error = 0, nfsvers, numops;
352 nfs_stateid stateid;
353 struct nfsm_chain nmreq;
354 struct nfsreq_secinfo_args si;
355
356 nmp = NFSTONMP(np);
357 if (nfs_mount_gone(nmp)) {
358 return ENXIO;
359 }
360 nfsvers = nmp->nm_vers;
361 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
362 return EINVAL;
363 }
364
365 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
366 nfsm_chain_null(&nmreq);
367
368 // PUTFH, READ, GETATTR
369 numops = 3;
370 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
371 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
372 numops--;
373 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
374 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
375 numops--;
376 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
377 nfs_get_stateid(np, thd, cred, &stateid);
378 nfsm_chain_add_stateid(error, &nmreq, &stateid);
379 nfsm_chain_add_64(error, &nmreq, offset);
380 nfsm_chain_add_32(error, &nmreq, len);
381 numops--;
382 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
383 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
384 nfsm_chain_build_done(error, &nmreq);
385 nfsm_assert(error, (numops == 0), EPROTO);
386 nfsmout_if(error);
387 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
388 nfsmout:
389 nfsm_chain_cleanup(&nmreq);
390 return error;
391 }
392
393 int
394 nfs4_read_rpc_async_finish(
395 nfsnode_t np,
396 struct nfsreq *req,
397 uio_t uio,
398 size_t *lenp,
399 int *eofp)
400 {
401 struct nfsmount *nmp;
402 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
403 size_t retlen = 0;
404 u_int64_t xid;
405 struct nfsm_chain nmrep;
406
407 nmp = NFSTONMP(np);
408 if (nfs_mount_gone(nmp)) {
409 nfs_request_async_cancel(req);
410 return ENXIO;
411 }
412 nfsvers = nmp->nm_vers;
413
414 nfsm_chain_null(&nmrep);
415
416 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
417 if (error == EINPROGRESS) { /* async request restarted */
418 return error;
419 }
420
421 if ((lockerror = nfs_node_lock(np))) {
422 error = lockerror;
423 }
424 nfsm_chain_skip_tag(error, &nmrep);
425 nfsm_chain_get_32(error, &nmrep, numops);
426 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
427 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
428 nfsm_chain_get_32(error, &nmrep, eof);
429 nfsm_chain_get_32(error, &nmrep, retlen);
430 if (!error) {
431 *lenp = MIN(retlen, *lenp);
432 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
433 }
434 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
435 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
436 if (!lockerror) {
437 nfs_node_unlock(np);
438 }
439 if (eofp) {
440 if (!eof && !retlen) {
441 eof = 1;
442 }
443 *eofp = eof;
444 }
445 nfsm_chain_cleanup(&nmrep);
446 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
447 microuptime(&np->n_lastio);
448 }
449 return error;
450 }
451
452 int
453 nfs4_write_rpc_async(
454 nfsnode_t np,
455 uio_t uio,
456 size_t len,
457 thread_t thd,
458 kauth_cred_t cred,
459 int iomode,
460 struct nfsreq_cbinfo *cb,
461 struct nfsreq **reqp)
462 {
463 struct nfsmount *nmp;
464 mount_t mp;
465 int error = 0, nfsvers, numops;
466 nfs_stateid stateid;
467 struct nfsm_chain nmreq;
468 struct nfsreq_secinfo_args si;
469
470 nmp = NFSTONMP(np);
471 if (nfs_mount_gone(nmp)) {
472 return ENXIO;
473 }
474 nfsvers = nmp->nm_vers;
475 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
476 return EINVAL;
477 }
478
479 /* for async mounts, don't bother sending sync write requests */
480 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
481 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
482 iomode = NFS_WRITE_UNSTABLE;
483 }
484
485 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
486 nfsm_chain_null(&nmreq);
487
488 // PUTFH, WRITE, GETATTR
489 numops = 3;
490 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
491 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
492 numops--;
493 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
494 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
495 numops--;
496 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
497 nfs_get_stateid(np, thd, cred, &stateid);
498 nfsm_chain_add_stateid(error, &nmreq, &stateid);
499 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
500 nfsm_chain_add_32(error, &nmreq, iomode);
501 nfsm_chain_add_32(error, &nmreq, len);
502 if (!error) {
503 error = nfsm_chain_add_uio(&nmreq, uio, len);
504 }
505 numops--;
506 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
507 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
508 nfsm_chain_build_done(error, &nmreq);
509 nfsm_assert(error, (numops == 0), EPROTO);
510 nfsmout_if(error);
511
512 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
513 nfsmout:
514 nfsm_chain_cleanup(&nmreq);
515 return error;
516 }
517
518 int
519 nfs4_write_rpc_async_finish(
520 nfsnode_t np,
521 struct nfsreq *req,
522 int *iomodep,
523 size_t *rlenp,
524 uint64_t *wverfp)
525 {
526 struct nfsmount *nmp;
527 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
528 int committed = NFS_WRITE_FILESYNC;
529 size_t rlen = 0;
530 u_int64_t xid, wverf;
531 mount_t mp;
532 struct nfsm_chain nmrep;
533
534 nmp = NFSTONMP(np);
535 if (nfs_mount_gone(nmp)) {
536 nfs_request_async_cancel(req);
537 return ENXIO;
538 }
539 nfsvers = nmp->nm_vers;
540
541 nfsm_chain_null(&nmrep);
542
543 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
544 if (error == EINPROGRESS) { /* async request restarted */
545 return error;
546 }
547 nmp = NFSTONMP(np);
548 if (nfs_mount_gone(nmp)) {
549 error = ENXIO;
550 }
551 if (!error && (lockerror = nfs_node_lock(np))) {
552 error = lockerror;
553 }
554 nfsm_chain_skip_tag(error, &nmrep);
555 nfsm_chain_get_32(error, &nmrep, numops);
556 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
557 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
558 nfsm_chain_get_32(error, &nmrep, rlen);
559 nfsmout_if(error);
560 *rlenp = rlen;
561 if (rlen <= 0) {
562 error = NFSERR_IO;
563 }
564 nfsm_chain_get_32(error, &nmrep, committed);
565 nfsm_chain_get_64(error, &nmrep, wverf);
566 nfsmout_if(error);
567 if (wverfp) {
568 *wverfp = wverf;
569 }
570 lck_mtx_lock(&nmp->nm_lock);
571 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
572 nmp->nm_verf = wverf;
573 nmp->nm_state |= NFSSTA_HASWRITEVERF;
574 } else if (nmp->nm_verf != wverf) {
575 nmp->nm_verf = wverf;
576 }
577 lck_mtx_unlock(&nmp->nm_lock);
578 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
579 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
580 nfsmout:
581 if (!lockerror) {
582 nfs_node_unlock(np);
583 }
584 nfsm_chain_cleanup(&nmrep);
585 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
586 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
587 committed = NFS_WRITE_FILESYNC;
588 }
589 *iomodep = committed;
590 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
591 microuptime(&np->n_lastio);
592 }
593 return error;
594 }
595
596 int
597 nfs4_remove_rpc(
598 nfsnode_t dnp,
599 char *name,
600 int namelen,
601 thread_t thd,
602 kauth_cred_t cred)
603 {
604 int error = 0, lockerror = ENOENT, remove_error = 0, status;
605 struct nfsmount *nmp;
606 int nfsvers, numops;
607 u_int64_t xid;
608 struct nfsm_chain nmreq, nmrep;
609 struct nfsreq_secinfo_args si;
610
611 nmp = NFSTONMP(dnp);
612 if (nfs_mount_gone(nmp)) {
613 return ENXIO;
614 }
615 nfsvers = nmp->nm_vers;
616 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
617 return EINVAL;
618 }
619 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
620 restart:
621 nfsm_chain_null(&nmreq);
622 nfsm_chain_null(&nmrep);
623
624 // PUTFH, REMOVE, GETATTR
625 numops = 3;
626 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
627 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
628 numops--;
629 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
630 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
631 numops--;
632 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
633 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
634 numops--;
635 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
636 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
637 nfsm_chain_build_done(error, &nmreq);
638 nfsm_assert(error, (numops == 0), EPROTO);
639 nfsmout_if(error);
640
641 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
642
643 if ((lockerror = nfs_node_lock(dnp))) {
644 error = lockerror;
645 }
646 nfsm_chain_skip_tag(error, &nmrep);
647 nfsm_chain_get_32(error, &nmrep, numops);
648 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
649 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
650 remove_error = error;
651 nfsm_chain_check_change_info(error, &nmrep, dnp);
652 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
653 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
654 if (error && !lockerror) {
655 NATTRINVALIDATE(dnp);
656 }
657 nfsmout:
658 nfsm_chain_cleanup(&nmreq);
659 nfsm_chain_cleanup(&nmrep);
660
661 if (!lockerror) {
662 dnp->n_flag |= NMODIFIED;
663 nfs_node_unlock(dnp);
664 }
665 if (error == NFSERR_GRACE) {
666 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
667 goto restart;
668 }
669
670 return remove_error;
671 }
672
673 int
674 nfs4_rename_rpc(
675 nfsnode_t fdnp,
676 char *fnameptr,
677 int fnamelen,
678 nfsnode_t tdnp,
679 char *tnameptr,
680 int tnamelen,
681 vfs_context_t ctx)
682 {
683 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
684 struct nfsmount *nmp;
685 u_int64_t xid, savedxid;
686 struct nfsm_chain nmreq, nmrep;
687 struct nfsreq_secinfo_args si;
688
689 nmp = NFSTONMP(fdnp);
690 if (nfs_mount_gone(nmp)) {
691 return ENXIO;
692 }
693 nfsvers = nmp->nm_vers;
694 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
695 return EINVAL;
696 }
697 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
698 return EINVAL;
699 }
700
701 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
702 nfsm_chain_null(&nmreq);
703 nfsm_chain_null(&nmrep);
704
705 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
706 numops = 7;
707 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
708 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
709 numops--;
710 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
711 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
712 numops--;
713 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
714 numops--;
715 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
716 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
717 numops--;
718 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
719 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
720 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
723 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
724 numops--;
725 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
726 numops--;
727 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
728 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
729 nfsm_chain_build_done(error, &nmreq);
730 nfsm_assert(error, (numops == 0), EPROTO);
731 nfsmout_if(error);
732
733 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
734
735 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
736 error = lockerror;
737 }
738 nfsm_chain_skip_tag(error, &nmrep);
739 nfsm_chain_get_32(error, &nmrep, numops);
740 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
741 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
742 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
743 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
744 nfsm_chain_check_change_info(error, &nmrep, fdnp);
745 nfsm_chain_check_change_info(error, &nmrep, tdnp);
746 /* directory attributes: if we don't get them, make sure to invalidate */
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
748 savedxid = xid;
749 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
750 if (error && !lockerror) {
751 NATTRINVALIDATE(tdnp);
752 }
753 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
754 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
755 xid = savedxid;
756 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
757 if (error && !lockerror) {
758 NATTRINVALIDATE(fdnp);
759 }
760 nfsmout:
761 nfsm_chain_cleanup(&nmreq);
762 nfsm_chain_cleanup(&nmrep);
763 if (!lockerror) {
764 fdnp->n_flag |= NMODIFIED;
765 tdnp->n_flag |= NMODIFIED;
766 nfs_node_unlock2(fdnp, tdnp);
767 }
768 return error;
769 }
770
771 /*
772 * NFS V4 readdir RPC.
773 */
774 int
775 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
776 {
777 struct nfsmount *nmp;
778 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
779 int i, status, more_entries = 1, eof, bp_dropped = 0;
780 uint32_t nmreaddirsize, nmrsize;
781 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
782 uint64_t cookie, lastcookie, xid, savedxid;
783 struct nfsm_chain nmreq, nmrep, nmrepsave;
784 fhandle_t fh;
785 struct nfs_vattr nvattr, *nvattrp;
786 struct nfs_dir_buf_header *ndbhp;
787 struct direntry *dp;
788 char *padstart, padlen;
789 const char *tag;
790 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
791 struct timeval now;
792 struct nfsreq_secinfo_args si;
793
794 nmp = NFSTONMP(dnp);
795 if (nfs_mount_gone(nmp)) {
796 return ENXIO;
797 }
798 nfsvers = nmp->nm_vers;
799 nmreaddirsize = nmp->nm_readdirsize;
800 nmrsize = nmp->nm_rsize;
801 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
802 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
803 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
804 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
805 return EINVAL;
806 }
807 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
808
809 /*
810 * Set up attribute request for entries.
811 * For READDIRPLUS functionality, get everything.
812 * Otherwise, just get what we need for struct direntry.
813 */
814 if (rdirplus) {
815 tag = "readdirplus";
816 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
817 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
818 } else {
819 tag = "readdir";
820 NFS_CLEAR_ATTRIBUTES(entry_attrs);
821 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
822 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
823 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
824 }
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
826
827 /* lock to protect access to cookie verifier */
828 if ((lockerror = nfs_node_lock(dnp))) {
829 return lockerror;
830 }
831
832 /* determine cookie to use, and move dp to the right offset */
833 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
834 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
835 if (ndbhp->ndbh_count) {
836 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
837 dp = NFS_DIRENTRY_NEXT(dp);
838 }
839 cookie = dp->d_seekoff;
840 dp = NFS_DIRENTRY_NEXT(dp);
841 } else {
842 cookie = bp->nb_lblkno;
843 /* increment with every buffer read */
844 OSAddAtomic64(1, &nfsstats.readdir_bios);
845 }
846 lastcookie = cookie;
847
848 /*
849 * The NFS client is responsible for the "." and ".." entries in the
850 * directory. So, we put them at the start of the first buffer.
851 * Don't bother for attribute directories.
852 */
853 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
854 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
855 fh.fh_len = 0;
856 fhlen = rdirplus ? fh.fh_len + 1 : 0;
857 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
858 /* "." */
859 namlen = 1;
860 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
861 if (xlen) {
862 bzero(&dp->d_name[namlen + 1], xlen);
863 }
864 dp->d_namlen = namlen;
865 strlcpy(dp->d_name, ".", namlen + 1);
866 dp->d_fileno = dnp->n_vattr.nva_fileid;
867 dp->d_type = DT_DIR;
868 dp->d_reclen = reclen;
869 dp->d_seekoff = 1;
870 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
871 dp = NFS_DIRENTRY_NEXT(dp);
872 padlen = (char*)dp - padstart;
873 if (padlen > 0) {
874 bzero(padstart, padlen);
875 }
876 if (rdirplus) { /* zero out attributes */
877 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
878 }
879
880 /* ".." */
881 namlen = 2;
882 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
883 if (xlen) {
884 bzero(&dp->d_name[namlen + 1], xlen);
885 }
886 dp->d_namlen = namlen;
887 strlcpy(dp->d_name, "..", namlen + 1);
888 if (dnp->n_parent) {
889 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
890 } else {
891 dp->d_fileno = dnp->n_vattr.nva_fileid;
892 }
893 dp->d_type = DT_DIR;
894 dp->d_reclen = reclen;
895 dp->d_seekoff = 2;
896 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
897 dp = NFS_DIRENTRY_NEXT(dp);
898 padlen = (char*)dp - padstart;
899 if (padlen > 0) {
900 bzero(padstart, padlen);
901 }
902 if (rdirplus) { /* zero out attributes */
903 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
904 }
905
906 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
907 ndbhp->ndbh_count = 2;
908 }
909
910 /*
911 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
912 * the buffer is full (or we hit EOF). Then put the remainder of the
913 * results in the next buffer(s).
914 */
915 nfsm_chain_null(&nmreq);
916 nfsm_chain_null(&nmrep);
917 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
918 // PUTFH, GETATTR, READDIR
919 numops = 3;
920 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
921 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
922 numops--;
923 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
924 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
925 numops--;
926 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
927 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
928 numops--;
929 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
930 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
931 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
932 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
933 nfsm_chain_add_32(error, &nmreq, nmrsize);
934 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
935 nfsm_chain_build_done(error, &nmreq);
936 nfsm_assert(error, (numops == 0), EPROTO);
937 nfs_node_unlock(dnp);
938 nfsmout_if(error);
939 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
940
941 if ((lockerror = nfs_node_lock(dnp))) {
942 error = lockerror;
943 }
944
945 savedxid = xid;
946 nfsm_chain_skip_tag(error, &nmrep);
947 nfsm_chain_get_32(error, &nmrep, numops);
948 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
949 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
950 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
951 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
952 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
953 nfsm_chain_get_32(error, &nmrep, more_entries);
954
955 if (!lockerror) {
956 nfs_node_unlock(dnp);
957 lockerror = ENOENT;
958 }
959 nfsmout_if(error);
960
961 if (rdirplus) {
962 microuptime(&now);
963 }
964
965 /* loop through the entries packing them into the buffer */
966 while (more_entries) {
967 /* Entry: COOKIE, NAME, FATTR */
968 nfsm_chain_get_64(error, &nmrep, cookie);
969 nfsm_chain_get_32(error, &nmrep, namlen);
970 nfsmout_if(error);
971 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
972 /* we've got a big cookie, make sure flag is set */
973 lck_mtx_lock(&nmp->nm_lock);
974 nmp->nm_state |= NFSSTA_BIGCOOKIES;
975 lck_mtx_unlock(&nmp->nm_lock);
976 bigcookies = 1;
977 }
978 /* just truncate names that don't fit in direntry.d_name */
979 if (namlen <= 0) {
980 error = EBADRPC;
981 goto nfsmout;
982 }
983 if (namlen > (sizeof(dp->d_name) - 1)) {
984 skiplen = namlen - sizeof(dp->d_name) + 1;
985 namlen = sizeof(dp->d_name) - 1;
986 } else {
987 skiplen = 0;
988 }
989 /* guess that fh size will be same as parent */
990 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
991 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
992 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
993 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
994 space_needed = reclen + attrlen;
995 space_free = nfs_dir_buf_freespace(bp, rdirplus);
996 if (space_needed > space_free) {
997 /*
998 * We still have entries to pack, but we've
999 * run out of room in the current buffer.
1000 * So we need to move to the next buffer.
1001 * The block# for the next buffer is the
1002 * last cookie in the current buffer.
1003 */
1004 nextbuffer:
1005 ndbhp->ndbh_flags |= NDB_FULL;
1006 nfs_buf_release(bp, 0);
1007 bp_dropped = 1;
1008 bp = NULL;
1009 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1010 nfsmout_if(error);
1011 /* initialize buffer */
1012 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1013 ndbhp->ndbh_flags = 0;
1014 ndbhp->ndbh_count = 0;
1015 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1016 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1017 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1018 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1019 /* increment with every buffer read */
1020 OSAddAtomic64(1, &nfsstats.readdir_bios);
1021 }
1022 nmrepsave = nmrep;
1023 dp->d_fileno = cookie; /* placeholder */
1024 dp->d_seekoff = cookie;
1025 dp->d_namlen = namlen;
1026 dp->d_reclen = reclen;
1027 dp->d_type = DT_UNKNOWN;
1028 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1029 nfsmout_if(error);
1030 dp->d_name[namlen] = '\0';
1031 if (skiplen) {
1032 nfsm_chain_adv(error, &nmrep,
1033 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1034 }
1035 nfsmout_if(error);
1036 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
1037 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
1038 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1039 /* we do NOT want ACLs returned to us here */
1040 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1041 if (nvattrp->nva_acl) {
1042 kauth_acl_free(nvattrp->nva_acl);
1043 nvattrp->nva_acl = NULL;
1044 }
1045 }
1046 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1047 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1048 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1049 /* set this up to look like a referral trigger */
1050 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
1051 }
1052 error = 0;
1053 }
1054 /* check for more entries after this one */
1055 nfsm_chain_get_32(error, &nmrep, more_entries);
1056 nfsmout_if(error);
1057
1058 /* Skip any "." and ".." entries returned from server. */
1059 /* Also skip any bothersome named attribute entries. */
1060 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1061 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1062 lastcookie = cookie;
1063 continue;
1064 }
1065
1066 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1067 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1068 }
1069 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1070 dp->d_fileno = nvattrp->nva_fileid;
1071 }
1072 if (rdirplus) {
1073 /* fileid is already in d_fileno, so stash xid in attrs */
1074 nvattrp->nva_fileid = savedxid;
1075 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1076 fhlen = fh.fh_len + 1;
1077 xlen = fhlen + sizeof(time_t);
1078 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1079 space_needed = reclen + attrlen;
1080 if (space_needed > space_free) {
1081 /* didn't actually have the room... move on to next buffer */
1082 nmrep = nmrepsave;
1083 goto nextbuffer;
1084 }
1085 /* pack the file handle into the record */
1086 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
1087 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
1088 } else {
1089 /* mark the file handle invalid */
1090 fh.fh_len = 0;
1091 fhlen = fh.fh_len + 1;
1092 xlen = fhlen + sizeof(time_t);
1093 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1094 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1095 }
1096 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1097 dp->d_reclen = reclen;
1098 }
1099 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1100 ndbhp->ndbh_count++;
1101 lastcookie = cookie;
1102
1103 /* advance to next direntry in buffer */
1104 dp = NFS_DIRENTRY_NEXT(dp);
1105 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1106 /* zero out the pad bytes */
1107 padlen = (char*)dp - padstart;
1108 if (padlen > 0) {
1109 bzero(padstart, padlen);
1110 }
1111 }
1112 /* Finally, get the eof boolean */
1113 nfsm_chain_get_32(error, &nmrep, eof);
1114 nfsmout_if(error);
1115 if (eof) {
1116 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1117 nfs_node_lock_force(dnp);
1118 dnp->n_eofcookie = lastcookie;
1119 nfs_node_unlock(dnp);
1120 } else {
1121 more_entries = 1;
1122 }
1123 if (bp_dropped) {
1124 nfs_buf_release(bp, 0);
1125 bp = NULL;
1126 break;
1127 }
1128 if ((lockerror = nfs_node_lock(dnp))) {
1129 error = lockerror;
1130 }
1131 nfsmout_if(error);
1132 nfsm_chain_cleanup(&nmrep);
1133 nfsm_chain_null(&nmreq);
1134 }
1135 nfsmout:
1136 if (bp_dropped && bp) {
1137 nfs_buf_release(bp, 0);
1138 }
1139 if (!lockerror) {
1140 nfs_node_unlock(dnp);
1141 }
1142 nfsm_chain_cleanup(&nmreq);
1143 nfsm_chain_cleanup(&nmrep);
1144 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1145 }
1146
1147 int
1148 nfs4_lookup_rpc_async(
1149 nfsnode_t dnp,
1150 char *name,
1151 int namelen,
1152 vfs_context_t ctx,
1153 struct nfsreq **reqp)
1154 {
1155 int error = 0, isdotdot = 0, nfsvers, numops;
1156 struct nfsm_chain nmreq;
1157 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1158 struct nfsmount *nmp;
1159 struct nfsreq_secinfo_args si;
1160
1161 nmp = NFSTONMP(dnp);
1162 if (nfs_mount_gone(nmp)) {
1163 return ENXIO;
1164 }
1165 nfsvers = nmp->nm_vers;
1166 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1167 return EINVAL;
1168 }
1169
1170 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1171 isdotdot = 1;
1172 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1173 } else {
1174 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1175 }
1176
1177 nfsm_chain_null(&nmreq);
1178
1179 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1180 numops = 5;
1181 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1182 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1183 numops--;
1184 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1185 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1186 numops--;
1187 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1188 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1189 numops--;
1190 if (isdotdot) {
1191 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1192 } else {
1193 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1194 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1195 }
1196 numops--;
1197 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1198 numops--;
1199 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1200 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1201 /* some ".zfs" directories can't handle being asked for some attributes */
1202 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1203 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1204 }
1205 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1206 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1207 }
1208 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1209 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1210 }
1211 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1212 nfsm_chain_build_done(error, &nmreq);
1213 nfsm_assert(error, (numops == 0), EPROTO);
1214 nfsmout_if(error);
1215 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1216 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1217 nfsmout:
1218 nfsm_chain_cleanup(&nmreq);
1219 return error;
1220 }
1221
1222
1223 int
1224 nfs4_lookup_rpc_async_finish(
1225 nfsnode_t dnp,
1226 char *name,
1227 int namelen,
1228 vfs_context_t ctx,
1229 struct nfsreq *req,
1230 u_int64_t *xidp,
1231 fhandle_t *fhp,
1232 struct nfs_vattr *nvap)
1233 {
1234 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1235 uint32_t op = NFS_OP_LOOKUP;
1236 u_int64_t xid;
1237 struct nfsmount *nmp;
1238 struct nfsm_chain nmrep;
1239
1240 nmp = NFSTONMP(dnp);
1241 if (nmp == NULL) {
1242 return ENXIO;
1243 }
1244 nfsvers = nmp->nm_vers;
1245 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1246 isdotdot = 1;
1247 }
1248
1249 nfsm_chain_null(&nmrep);
1250
1251 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1252
1253 if ((lockerror = nfs_node_lock(dnp))) {
1254 error = lockerror;
1255 }
1256 nfsm_chain_skip_tag(error, &nmrep);
1257 nfsm_chain_get_32(error, &nmrep, numops);
1258 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1259 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1260 if (xidp) {
1261 *xidp = xid;
1262 }
1263 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1264
1265 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1266 nfsmout_if(error || !fhp || !nvap);
1267 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1268 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1269 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1270 error = EBADRPC;
1271 }
1272 nfsmout_if(error);
1273 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1274 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1275 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1276 /* set this up to look like a referral trigger */
1277 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1278 error = 0;
1279 } else {
1280 nfsmout_if(error);
1281 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1282 }
1283 nfsmout:
1284 if (!lockerror) {
1285 nfs_node_unlock(dnp);
1286 }
1287 nfsm_chain_cleanup(&nmrep);
1288 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1289 /* We still need to get SECINFO to set default for mount. */
1290 /* Do so for the first LOOKUP that returns successfully. */
1291 struct nfs_sec sec;
1292
1293 sec.count = NX_MAX_SEC_FLAVORS;
1294 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1295 /* [sigh] some implementations return "illegal" error for unsupported ops */
1296 if (error == NFSERR_OP_ILLEGAL) {
1297 error = 0;
1298 }
1299 if (!error) {
1300 /* set our default security flavor to the first in the list */
1301 lck_mtx_lock(&nmp->nm_lock);
1302 if (sec.count) {
1303 nmp->nm_auth = sec.flavors[0];
1304 }
1305 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1306 lck_mtx_unlock(&nmp->nm_lock);
1307 }
1308 }
1309 return error;
1310 }
1311
1312 int
1313 nfs4_commit_rpc(
1314 nfsnode_t np,
1315 uint64_t offset,
1316 uint64_t count,
1317 kauth_cred_t cred,
1318 uint64_t wverf)
1319 {
1320 struct nfsmount *nmp;
1321 int error = 0, lockerror, status, nfsvers, numops;
1322 u_int64_t xid, newwverf;
1323 uint32_t count32;
1324 struct nfsm_chain nmreq, nmrep;
1325 struct nfsreq_secinfo_args si;
1326
1327 nmp = NFSTONMP(np);
1328 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1329 if (nfs_mount_gone(nmp)) {
1330 return ENXIO;
1331 }
1332 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1333 return EINVAL;
1334 }
1335 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1336 return 0;
1337 }
1338 nfsvers = nmp->nm_vers;
1339
1340 if (count > UINT32_MAX) {
1341 count32 = 0;
1342 } else {
1343 count32 = count;
1344 }
1345
1346 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1347 nfsm_chain_null(&nmreq);
1348 nfsm_chain_null(&nmrep);
1349
1350 // PUTFH, COMMIT, GETATTR
1351 numops = 3;
1352 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1353 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1354 numops--;
1355 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1356 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1357 numops--;
1358 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1359 nfsm_chain_add_64(error, &nmreq, offset);
1360 nfsm_chain_add_32(error, &nmreq, count32);
1361 numops--;
1362 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1363 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1364 nfsm_chain_build_done(error, &nmreq);
1365 nfsm_assert(error, (numops == 0), EPROTO);
1366 nfsmout_if(error);
1367 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1368 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1369
1370 if ((lockerror = nfs_node_lock(np))) {
1371 error = lockerror;
1372 }
1373 nfsm_chain_skip_tag(error, &nmrep);
1374 nfsm_chain_get_32(error, &nmrep, numops);
1375 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1376 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1377 nfsm_chain_get_64(error, &nmrep, newwverf);
1378 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1379 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1380 if (!lockerror) {
1381 nfs_node_unlock(np);
1382 }
1383 nfsmout_if(error);
1384 lck_mtx_lock(&nmp->nm_lock);
1385 if (nmp->nm_verf != newwverf) {
1386 nmp->nm_verf = newwverf;
1387 }
1388 if (wverf != newwverf) {
1389 error = NFSERR_STALEWRITEVERF;
1390 }
1391 lck_mtx_unlock(&nmp->nm_lock);
1392 nfsmout:
1393 nfsm_chain_cleanup(&nmreq);
1394 nfsm_chain_cleanup(&nmrep);
1395 return error;
1396 }
1397
1398 int
1399 nfs4_pathconf_rpc(
1400 nfsnode_t np,
1401 struct nfs_fsattr *nfsap,
1402 vfs_context_t ctx)
1403 {
1404 u_int64_t xid;
1405 int error = 0, lockerror, status, nfsvers, numops;
1406 struct nfsm_chain nmreq, nmrep;
1407 struct nfsmount *nmp = NFSTONMP(np);
1408 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1409 struct nfs_vattr nvattr;
1410 struct nfsreq_secinfo_args si;
1411
1412 if (nfs_mount_gone(nmp)) {
1413 return ENXIO;
1414 }
1415 nfsvers = nmp->nm_vers;
1416 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1417 return EINVAL;
1418 }
1419
1420 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1421 NVATTR_INIT(&nvattr);
1422 nfsm_chain_null(&nmreq);
1423 nfsm_chain_null(&nmrep);
1424
1425 /* NFSv4: fetch "pathconf" info for this node */
1426 // PUTFH, GETATTR
1427 numops = 2;
1428 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1429 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1430 numops--;
1431 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1432 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1433 numops--;
1434 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1435 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1436 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1437 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1438 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1439 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1440 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1441 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1442 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1443 nfsm_chain_build_done(error, &nmreq);
1444 nfsm_assert(error, (numops == 0), EPROTO);
1445 nfsmout_if(error);
1446 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1447
1448 nfsm_chain_skip_tag(error, &nmrep);
1449 nfsm_chain_get_32(error, &nmrep, numops);
1450 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1451 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1452 nfsmout_if(error);
1453 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1454 nfsmout_if(error);
1455 if ((lockerror = nfs_node_lock(np))) {
1456 error = lockerror;
1457 }
1458 if (!error) {
1459 nfs_loadattrcache(np, &nvattr, &xid, 0);
1460 }
1461 if (!lockerror) {
1462 nfs_node_unlock(np);
1463 }
1464 nfsmout:
1465 NVATTR_CLEANUP(&nvattr);
1466 nfsm_chain_cleanup(&nmreq);
1467 nfsm_chain_cleanup(&nmrep);
1468 return error;
1469 }
1470
1471 int
1472 nfs4_vnop_getattr(
1473 struct vnop_getattr_args /* {
1474 * struct vnodeop_desc *a_desc;
1475 * vnode_t a_vp;
1476 * struct vnode_attr *a_vap;
1477 * vfs_context_t a_context;
1478 * } */*ap)
1479 {
1480 struct vnode_attr *vap = ap->a_vap;
1481 struct nfsmount *nmp;
1482 struct nfs_vattr nva;
1483 int error, acls, ngaflags;
1484
1485 nmp = VTONMP(ap->a_vp);
1486 if (nfs_mount_gone(nmp)) {
1487 return ENXIO;
1488 }
1489 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1490
1491 ngaflags = NGA_CACHED;
1492 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1493 ngaflags |= NGA_ACL;
1494 }
1495 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1496 if (error) {
1497 return error;
1498 }
1499
1500 /* copy what we have in nva to *a_vap */
1501 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1502 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1503 VATTR_RETURN(vap, va_rdev, rdev);
1504 }
1505 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) {
1506 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1507 }
1508 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) {
1509 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1510 }
1511 // VATTR_RETURN(vap, va_data_alloc, ???);
1512 // VATTR_RETURN(vap, va_total_size, ???);
1513 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) {
1514 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1515 }
1516 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1517 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1518 }
1519 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1520 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1521 }
1522 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1523 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1524 }
1525 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1526 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1527 }
1528 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1529 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) {
1530 VATTR_RETURN(vap, va_mode, 0777);
1531 } else {
1532 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1533 }
1534 }
1535 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1536 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1537 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1538 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1539 uint32_t flags = 0;
1540 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1541 (nva.nva_flags & NFS_FFLAG_ARCHIVED)) {
1542 flags |= SF_ARCHIVED;
1543 }
1544 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1545 (nva.nva_flags & NFS_FFLAG_HIDDEN)) {
1546 flags |= UF_HIDDEN;
1547 }
1548 VATTR_RETURN(vap, va_flags, flags);
1549 }
1550 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1551 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1552 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1553 VATTR_SET_SUPPORTED(vap, va_create_time);
1554 }
1555 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1556 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1557 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1558 VATTR_SET_SUPPORTED(vap, va_access_time);
1559 }
1560 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1561 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1562 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1563 VATTR_SET_SUPPORTED(vap, va_modify_time);
1564 }
1565 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1566 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1567 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1568 VATTR_SET_SUPPORTED(vap, va_change_time);
1569 }
1570 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1571 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1572 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1573 VATTR_SET_SUPPORTED(vap, va_backup_time);
1574 }
1575 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) {
1576 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1577 }
1578 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) {
1579 VATTR_RETURN(vap, va_type, nva.nva_type);
1580 }
1581 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) {
1582 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1583 }
1584
1585 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1586 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1587 nva.nva_acl = NULL;
1588 }
1589
1590 // other attrs we might support someday:
1591 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1592
1593 NVATTR_CLEANUP(&nva);
1594 return error;
1595 }
1596
1597 int
1598 nfs4_setattr_rpc(
1599 nfsnode_t np,
1600 struct vnode_attr *vap,
1601 vfs_context_t ctx)
1602 {
1603 struct nfsmount *nmp = NFSTONMP(np);
1604 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1605 u_int64_t xid, nextxid;
1606 struct nfsm_chain nmreq, nmrep;
1607 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1608 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1609 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1610 nfs_stateid stateid;
1611 struct nfsreq_secinfo_args si;
1612
1613 if (nfs_mount_gone(nmp)) {
1614 return ENXIO;
1615 }
1616 nfsvers = nmp->nm_vers;
1617 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1618 return EINVAL;
1619 }
1620
1621 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1622 /* we don't support setting unsupported flags (duh!) */
1623 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1624 return EINVAL; /* return EINVAL if other attributes also set */
1625 } else {
1626 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1627 }
1628 }
1629
1630 /* don't bother requesting some changes if they don't look like they are changing */
1631 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1632 VATTR_CLEAR_ACTIVE(vap, va_uid);
1633 }
1634 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1635 VATTR_CLEAR_ACTIVE(vap, va_gid);
1636 }
1637 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1638 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1639 }
1640 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1641 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1642 }
1643
1644 tryagain:
1645 /* do nothing if no attributes will be sent */
1646 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1647 if (!bitmap[0] && !bitmap[1]) {
1648 return 0;
1649 }
1650
1651 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1652 nfsm_chain_null(&nmreq);
1653 nfsm_chain_null(&nmrep);
1654
1655 /*
1656 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1657 * need to invalidate any cached ACL. And if we had an ACL cached,
1658 * we might as well also fetch the new value.
1659 */
1660 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1661 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1662 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1663 if (NACLVALID(np)) {
1664 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1665 }
1666 NACLINVALIDATE(np);
1667 }
1668
1669 // PUTFH, SETATTR, GETATTR
1670 numops = 3;
1671 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1672 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1673 numops--;
1674 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1675 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1676 numops--;
1677 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1678 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1679 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1680 } else {
1681 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1682 }
1683 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1684 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1685 numops--;
1686 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1687 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1688 nfsm_chain_build_done(error, &nmreq);
1689 nfsm_assert(error, (numops == 0), EPROTO);
1690 nfsmout_if(error);
1691 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1692
1693 if ((lockerror = nfs_node_lock(np))) {
1694 error = lockerror;
1695 }
1696 nfsm_chain_skip_tag(error, &nmrep);
1697 nfsm_chain_get_32(error, &nmrep, numops);
1698 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1699 nfsmout_if(error);
1700 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1701 nfsmout_if(error == EBADRPC);
1702 setattr_error = error;
1703 error = 0;
1704 bmlen = NFS_ATTR_BITMAP_LEN;
1705 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1706 if (!error) {
1707 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1708 microuptime(&np->n_lastio);
1709 }
1710 nfs_vattr_set_supported(setbitmap, vap);
1711 error = setattr_error;
1712 }
1713 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1714 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1715 if (error) {
1716 NATTRINVALIDATE(np);
1717 }
1718 /*
1719 * We just changed the attributes and we want to make sure that we
1720 * see the latest attributes. Get the next XID. If it's not the
1721 * next XID after the SETATTR XID, then it's possible that another
1722 * RPC was in flight at the same time and it might put stale attributes
1723 * in the cache. In that case, we invalidate the attributes and set
1724 * the attribute cache XID to guarantee that newer attributes will
1725 * get loaded next.
1726 */
1727 nextxid = 0;
1728 nfs_get_xid(&nextxid);
1729 if (nextxid != (xid + 1)) {
1730 np->n_xid = nextxid;
1731 NATTRINVALIDATE(np);
1732 }
1733 nfsmout:
1734 if (!lockerror) {
1735 nfs_node_unlock(np);
1736 }
1737 nfsm_chain_cleanup(&nmreq);
1738 nfsm_chain_cleanup(&nmrep);
1739 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1740 /*
1741 * Some server's may not like ACL/mode combos that get sent.
1742 * If it looks like that's what the server choked on, try setting
1743 * just the ACL and not the mode (unless it looks like everything
1744 * but mode was already successfully set).
1745 */
1746 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1747 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1748 VATTR_CLEAR_ACTIVE(vap, va_mode);
1749 error = 0;
1750 goto tryagain;
1751 }
1752 }
1753 return error;
1754 }
1755
1756 /*
1757 * Wait for any pending recovery to complete.
1758 */
1759 int
1760 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1761 {
1762 struct timespec ts = { 1, 0 };
1763 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1764
1765 lck_mtx_lock(&nmp->nm_lock);
1766 while (nmp->nm_state & NFSSTA_RECOVER) {
1767 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1768 break;
1769 }
1770 nfs_mount_sock_thread_wake(nmp);
1771 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1772 slpflag = 0;
1773 }
1774 lck_mtx_unlock(&nmp->nm_lock);
1775
1776 return error;
1777 }
1778
1779 /*
1780 * We're about to use/manipulate NFS mount's open/lock state.
1781 * Wait for any pending state recovery to complete, then
1782 * mark the state as being in use (which will hold off
1783 * the recovery thread until we're done).
1784 */
1785 int
1786 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1787 {
1788 struct timespec ts = { 1, 0 };
1789 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1790
1791 if (nfs_mount_gone(nmp)) {
1792 return ENXIO;
1793 }
1794 lck_mtx_lock(&nmp->nm_lock);
1795 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1796 lck_mtx_unlock(&nmp->nm_lock);
1797 return ENXIO;
1798 }
1799 while (nmp->nm_state & NFSSTA_RECOVER) {
1800 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1801 break;
1802 }
1803 nfs_mount_sock_thread_wake(nmp);
1804 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1805 slpflag = 0;
1806 }
1807 if (!error) {
1808 nmp->nm_stateinuse++;
1809 }
1810 lck_mtx_unlock(&nmp->nm_lock);
1811
1812 return error;
1813 }
1814
1815 /*
1816 * We're done using/manipulating the NFS mount's open/lock
1817 * state. If the given error indicates that recovery should
1818 * be performed, we'll initiate recovery.
1819 */
1820 int
1821 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1822 {
1823 int restart = nfs_mount_state_error_should_restart(error);
1824
1825 if (nfs_mount_gone(nmp)) {
1826 return restart;
1827 }
1828 lck_mtx_lock(&nmp->nm_lock);
1829 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1830 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1831 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1832 nfs_need_recover(nmp, error);
1833 }
1834 if (nmp->nm_stateinuse > 0) {
1835 nmp->nm_stateinuse--;
1836 } else {
1837 panic("NFS mount state in use count underrun");
1838 }
1839 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1840 wakeup(&nmp->nm_stateinuse);
1841 }
1842 lck_mtx_unlock(&nmp->nm_lock);
1843 if (error == NFSERR_GRACE) {
1844 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1845 }
1846
1847 return restart;
1848 }
1849
1850 /*
1851 * Does the error mean we should restart/redo a state-related operation?
1852 */
1853 int
1854 nfs_mount_state_error_should_restart(int error)
1855 {
1856 switch (error) {
1857 case NFSERR_STALE_STATEID:
1858 case NFSERR_STALE_CLIENTID:
1859 case NFSERR_ADMIN_REVOKED:
1860 case NFSERR_EXPIRED:
1861 case NFSERR_OLD_STATEID:
1862 case NFSERR_BAD_STATEID:
1863 case NFSERR_GRACE:
1864 return 1;
1865 }
1866 return 0;
1867 }
1868
1869 /*
1870 * In some cases we may want to limit how many times we restart a
1871 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1872 * Base the limit on the lease (as long as it's not too short).
1873 */
1874 uint
1875 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1876 {
1877 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1878 }
1879
1880 /*
1881 * Does the error mean we probably lost a delegation?
1882 */
1883 int
1884 nfs_mount_state_error_delegation_lost(int error)
1885 {
1886 switch (error) {
1887 case NFSERR_STALE_STATEID:
1888 case NFSERR_ADMIN_REVOKED:
1889 case NFSERR_EXPIRED:
1890 case NFSERR_OLD_STATEID:
1891 case NFSERR_BAD_STATEID:
1892 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1893 return 1;
1894 }
1895 return 0;
1896 }
1897
1898
1899 /*
1900 * Mark an NFS node's open state as busy.
1901 */
1902 int
1903 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1904 {
1905 struct nfsmount *nmp;
1906 struct timespec ts = {2, 0};
1907 int error = 0, slpflag;
1908
1909 nmp = NFSTONMP(np);
1910 if (nfs_mount_gone(nmp)) {
1911 return ENXIO;
1912 }
1913 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1914
1915 lck_mtx_lock(&np->n_openlock);
1916 while (np->n_openflags & N_OPENBUSY) {
1917 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1918 break;
1919 }
1920 np->n_openflags |= N_OPENWANT;
1921 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1922 slpflag = 0;
1923 }
1924 if (!error) {
1925 np->n_openflags |= N_OPENBUSY;
1926 }
1927 lck_mtx_unlock(&np->n_openlock);
1928
1929 return error;
1930 }
1931
1932 /*
1933 * Clear an NFS node's open state busy flag and wake up
1934 * anyone wanting it.
1935 */
1936 void
1937 nfs_open_state_clear_busy(nfsnode_t np)
1938 {
1939 int wanted;
1940
1941 lck_mtx_lock(&np->n_openlock);
1942 if (!(np->n_openflags & N_OPENBUSY)) {
1943 panic("nfs_open_state_clear_busy");
1944 }
1945 wanted = (np->n_openflags & N_OPENWANT);
1946 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1947 lck_mtx_unlock(&np->n_openlock);
1948 if (wanted) {
1949 wakeup(&np->n_openflags);
1950 }
1951 }
1952
1953 /*
1954 * Search a mount's open owner list for the owner for this credential.
1955 * If not found and "alloc" is set, then allocate a new one.
1956 */
1957 struct nfs_open_owner *
1958 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1959 {
1960 uid_t uid = kauth_cred_getuid(cred);
1961 struct nfs_open_owner *noop, *newnoop = NULL;
1962
1963 tryagain:
1964 lck_mtx_lock(&nmp->nm_lock);
1965 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1966 if (kauth_cred_getuid(noop->noo_cred) == uid) {
1967 break;
1968 }
1969 }
1970
1971 if (!noop && !newnoop && alloc) {
1972 lck_mtx_unlock(&nmp->nm_lock);
1973 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1974 if (!newnoop) {
1975 return NULL;
1976 }
1977 bzero(newnoop, sizeof(*newnoop));
1978 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1979 newnoop->noo_mount = nmp;
1980 kauth_cred_ref(cred);
1981 newnoop->noo_cred = cred;
1982 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1983 TAILQ_INIT(&newnoop->noo_opens);
1984 goto tryagain;
1985 }
1986 if (!noop && newnoop) {
1987 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
1988 os_ref_init(&newnoop->noo_refcnt, NULL);
1989 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
1990 noop = newnoop;
1991 }
1992 lck_mtx_unlock(&nmp->nm_lock);
1993
1994 if (newnoop && (noop != newnoop)) {
1995 nfs_open_owner_destroy(newnoop);
1996 }
1997
1998 if (noop) {
1999 nfs_open_owner_ref(noop);
2000 }
2001
2002 return noop;
2003 }
2004
2005 /*
2006 * destroy an open owner that's no longer needed
2007 */
2008 void
2009 nfs_open_owner_destroy(struct nfs_open_owner *noop)
2010 {
2011 if (noop->noo_cred) {
2012 kauth_cred_unref(&noop->noo_cred);
2013 }
2014 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2015 FREE(noop, M_TEMP);
2016 }
2017
2018 /*
2019 * acquire a reference count on an open owner
2020 */
2021 void
2022 nfs_open_owner_ref(struct nfs_open_owner *noop)
2023 {
2024 lck_mtx_lock(&noop->noo_lock);
2025 os_ref_retain_locked(&noop->noo_refcnt);
2026 lck_mtx_unlock(&noop->noo_lock);
2027 }
2028
2029 /*
2030 * drop a reference count on an open owner and destroy it if
2031 * it is no longer referenced and no longer on the mount's list.
2032 */
2033 void
2034 nfs_open_owner_rele(struct nfs_open_owner *noop)
2035 {
2036 os_ref_count_t newcount;
2037
2038 lck_mtx_lock(&noop->noo_lock);
2039 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2040 panic("nfs_open_owner_rele: no refcnt");
2041 }
2042 newcount = os_ref_release_locked(&noop->noo_refcnt);
2043 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2044 panic("nfs_open_owner_rele: busy");
2045 }
2046 /* XXX we may potentially want to clean up idle/unused open owner structures */
2047 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2048 lck_mtx_unlock(&noop->noo_lock);
2049 return;
2050 }
2051 /* owner is no longer referenced or linked to mount, so destroy it */
2052 lck_mtx_unlock(&noop->noo_lock);
2053 nfs_open_owner_destroy(noop);
2054 }
2055
2056 /*
2057 * Mark an open owner as busy because we are about to
2058 * start an operation that uses and updates open owner state.
2059 */
2060 int
2061 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2062 {
2063 struct nfsmount *nmp;
2064 struct timespec ts = {2, 0};
2065 int error = 0, slpflag;
2066
2067 nmp = noop->noo_mount;
2068 if (nfs_mount_gone(nmp)) {
2069 return ENXIO;
2070 }
2071 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2072
2073 lck_mtx_lock(&noop->noo_lock);
2074 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2075 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2076 break;
2077 }
2078 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2079 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2080 slpflag = 0;
2081 }
2082 if (!error) {
2083 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2084 }
2085 lck_mtx_unlock(&noop->noo_lock);
2086
2087 return error;
2088 }
2089
2090 /*
2091 * Clear the busy flag on an open owner and wake up anyone waiting
2092 * to mark it busy.
2093 */
2094 void
2095 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2096 {
2097 int wanted;
2098
2099 lck_mtx_lock(&noop->noo_lock);
2100 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2101 panic("nfs_open_owner_clear_busy");
2102 }
2103 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2104 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2105 lck_mtx_unlock(&noop->noo_lock);
2106 if (wanted) {
2107 wakeup(noop);
2108 }
2109 }
2110
2111 /*
2112 * Given an open/lock owner and an error code, increment the
2113 * sequence ID if appropriate.
2114 */
2115 void
2116 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2117 {
2118 switch (error) {
2119 case NFSERR_STALE_CLIENTID:
2120 case NFSERR_STALE_STATEID:
2121 case NFSERR_OLD_STATEID:
2122 case NFSERR_BAD_STATEID:
2123 case NFSERR_BAD_SEQID:
2124 case NFSERR_BADXDR:
2125 case NFSERR_RESOURCE:
2126 case NFSERR_NOFILEHANDLE:
2127 /* do not increment the open seqid on these errors */
2128 return;
2129 }
2130 if (noop) {
2131 noop->noo_seqid++;
2132 }
2133 if (nlop) {
2134 nlop->nlo_seqid++;
2135 }
2136 }
2137
2138 /*
2139 * Search a node's open file list for any conflicts with this request.
2140 * Also find this open owner's open file structure.
2141 * If not found and "alloc" is set, then allocate one.
2142 */
2143 int
2144 nfs_open_file_find(
2145 nfsnode_t np,
2146 struct nfs_open_owner *noop,
2147 struct nfs_open_file **nofpp,
2148 uint32_t accessMode,
2149 uint32_t denyMode,
2150 int alloc)
2151 {
2152 *nofpp = NULL;
2153 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2154 }
2155
2156 /*
2157 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2158 * if an existing one is not found. This is used in "create" scenarios to
2159 * officially add the provisional nofp to the node once the node is created.
2160 */
2161 int
2162 nfs_open_file_find_internal(
2163 nfsnode_t np,
2164 struct nfs_open_owner *noop,
2165 struct nfs_open_file **nofpp,
2166 uint32_t accessMode,
2167 uint32_t denyMode,
2168 int alloc)
2169 {
2170 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2171
2172 if (!np) {
2173 goto alloc;
2174 }
2175 tryagain:
2176 lck_mtx_lock(&np->n_openlock);
2177 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2178 if (nofp2->nof_owner == noop) {
2179 nofp = nofp2;
2180 if (!accessMode) {
2181 break;
2182 }
2183 }
2184 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2185 /* This request conflicts with an existing open on this client. */
2186 lck_mtx_unlock(&np->n_openlock);
2187 return EACCES;
2188 }
2189 }
2190
2191 /*
2192 * If this open owner doesn't have an open
2193 * file structure yet, we create one for it.
2194 */
2195 if (!nofp && !*nofpp && !newnofp && alloc) {
2196 lck_mtx_unlock(&np->n_openlock);
2197 alloc:
2198 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2199 if (!newnofp) {
2200 return ENOMEM;
2201 }
2202 bzero(newnofp, sizeof(*newnofp));
2203 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2204 newnofp->nof_owner = noop;
2205 nfs_open_owner_ref(noop);
2206 newnofp->nof_np = np;
2207 lck_mtx_lock(&noop->noo_lock);
2208 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2209 lck_mtx_unlock(&noop->noo_lock);
2210 if (np) {
2211 goto tryagain;
2212 }
2213 }
2214 if (!nofp) {
2215 if (*nofpp) {
2216 (*nofpp)->nof_np = np;
2217 nofp = *nofpp;
2218 } else {
2219 nofp = newnofp;
2220 }
2221 if (nofp && np) {
2222 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2223 }
2224 }
2225 if (np) {
2226 lck_mtx_unlock(&np->n_openlock);
2227 }
2228
2229 if (alloc && newnofp && (nofp != newnofp)) {
2230 nfs_open_file_destroy(newnofp);
2231 }
2232
2233 *nofpp = nofp;
2234 return nofp ? 0 : ESRCH;
2235 }
2236
2237 /*
2238 * Destroy an open file structure.
2239 */
2240 void
2241 nfs_open_file_destroy(struct nfs_open_file *nofp)
2242 {
2243 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2244 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2245 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2246 nfs_open_owner_rele(nofp->nof_owner);
2247 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2248 FREE(nofp, M_TEMP);
2249 }
2250
2251 /*
2252 * Mark an open file as busy because we are about to
2253 * start an operation that uses and updates open file state.
2254 */
2255 int
2256 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2257 {
2258 struct nfsmount *nmp;
2259 struct timespec ts = {2, 0};
2260 int error = 0, slpflag;
2261
2262 nmp = nofp->nof_owner->noo_mount;
2263 if (nfs_mount_gone(nmp)) {
2264 return ENXIO;
2265 }
2266 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2267
2268 lck_mtx_lock(&nofp->nof_lock);
2269 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2270 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2271 break;
2272 }
2273 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2274 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2275 slpflag = 0;
2276 }
2277 if (!error) {
2278 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2279 }
2280 lck_mtx_unlock(&nofp->nof_lock);
2281
2282 return error;
2283 }
2284
2285 /*
2286 * Clear the busy flag on an open file and wake up anyone waiting
2287 * to mark it busy.
2288 */
2289 void
2290 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2291 {
2292 int wanted;
2293
2294 lck_mtx_lock(&nofp->nof_lock);
2295 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2296 panic("nfs_open_file_clear_busy");
2297 }
2298 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2299 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2300 lck_mtx_unlock(&nofp->nof_lock);
2301 if (wanted) {
2302 wakeup(nofp);
2303 }
2304 }
2305
2306 /*
2307 * Add the open state for the given access/deny modes to this open file.
2308 */
2309 void
2310 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2311 {
2312 lck_mtx_lock(&nofp->nof_lock);
2313 nofp->nof_access |= accessMode;
2314 nofp->nof_deny |= denyMode;
2315
2316 if (delegated) {
2317 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2318 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2319 nofp->nof_d_r++;
2320 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2321 nofp->nof_d_w++;
2322 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2323 nofp->nof_d_rw++;
2324 }
2325 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2326 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2327 nofp->nof_d_r_dw++;
2328 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2329 nofp->nof_d_w_dw++;
2330 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2331 nofp->nof_d_rw_dw++;
2332 }
2333 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2334 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2335 nofp->nof_d_r_drw++;
2336 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2337 nofp->nof_d_w_drw++;
2338 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2339 nofp->nof_d_rw_drw++;
2340 }
2341 }
2342 } else {
2343 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2344 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2345 nofp->nof_r++;
2346 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2347 nofp->nof_w++;
2348 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2349 nofp->nof_rw++;
2350 }
2351 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2352 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2353 nofp->nof_r_dw++;
2354 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2355 nofp->nof_w_dw++;
2356 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2357 nofp->nof_rw_dw++;
2358 }
2359 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2360 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2361 nofp->nof_r_drw++;
2362 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2363 nofp->nof_w_drw++;
2364 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2365 nofp->nof_rw_drw++;
2366 }
2367 }
2368 }
2369
2370 nofp->nof_opencnt++;
2371 lck_mtx_unlock(&nofp->nof_lock);
2372 }
2373
2374 /*
2375 * Find which particular open combo will be closed and report what
2376 * the new modes will be and whether the open was delegated.
2377 */
2378 void
2379 nfs_open_file_remove_open_find(
2380 struct nfs_open_file *nofp,
2381 uint32_t accessMode,
2382 uint32_t denyMode,
2383 uint32_t *newAccessMode,
2384 uint32_t *newDenyMode,
2385 int *delegated)
2386 {
2387 /*
2388 * Calculate new modes: a mode bit gets removed when there's only
2389 * one count in all the corresponding counts
2390 */
2391 *newAccessMode = nofp->nof_access;
2392 *newDenyMode = nofp->nof_deny;
2393
2394 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2395 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2396 ((nofp->nof_r + nofp->nof_d_r +
2397 nofp->nof_rw + nofp->nof_d_rw +
2398 nofp->nof_r_dw + nofp->nof_d_r_dw +
2399 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2400 nofp->nof_r_drw + nofp->nof_d_r_drw +
2401 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2402 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2403 }
2404 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2405 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2406 ((nofp->nof_w + nofp->nof_d_w +
2407 nofp->nof_rw + nofp->nof_d_rw +
2408 nofp->nof_w_dw + nofp->nof_d_w_dw +
2409 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2410 nofp->nof_w_drw + nofp->nof_d_w_drw +
2411 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2412 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2413 }
2414 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2415 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2416 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2417 nofp->nof_w_drw + nofp->nof_d_w_drw +
2418 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2419 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2420 }
2421 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2422 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2423 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2424 nofp->nof_w_drw + nofp->nof_d_w_drw +
2425 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2426 nofp->nof_r_dw + nofp->nof_d_r_dw +
2427 nofp->nof_w_dw + nofp->nof_d_w_dw +
2428 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2429 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2430 }
2431
2432 /* Find the corresponding open access/deny mode counter. */
2433 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2434 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2435 *delegated = (nofp->nof_d_r != 0);
2436 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2437 *delegated = (nofp->nof_d_w != 0);
2438 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2439 *delegated = (nofp->nof_d_rw != 0);
2440 } else {
2441 *delegated = 0;
2442 }
2443 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2444 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2445 *delegated = (nofp->nof_d_r_dw != 0);
2446 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2447 *delegated = (nofp->nof_d_w_dw != 0);
2448 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2449 *delegated = (nofp->nof_d_rw_dw != 0);
2450 } else {
2451 *delegated = 0;
2452 }
2453 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2454 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2455 *delegated = (nofp->nof_d_r_drw != 0);
2456 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2457 *delegated = (nofp->nof_d_w_drw != 0);
2458 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2459 *delegated = (nofp->nof_d_rw_drw != 0);
2460 } else {
2461 *delegated = 0;
2462 }
2463 }
2464 }
2465
2466 /*
2467 * Remove the open state for the given access/deny modes to this open file.
2468 */
2469 void
2470 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2471 {
2472 uint32_t newAccessMode, newDenyMode;
2473 int delegated = 0;
2474
2475 lck_mtx_lock(&nofp->nof_lock);
2476 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2477
2478 /* Decrement the corresponding open access/deny mode counter. */
2479 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2480 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2481 if (delegated) {
2482 if (nofp->nof_d_r == 0) {
2483 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2484 } else {
2485 nofp->nof_d_r--;
2486 }
2487 } else {
2488 if (nofp->nof_r == 0) {
2489 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2490 } else {
2491 nofp->nof_r--;
2492 }
2493 }
2494 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2495 if (delegated) {
2496 if (nofp->nof_d_w == 0) {
2497 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2498 } else {
2499 nofp->nof_d_w--;
2500 }
2501 } else {
2502 if (nofp->nof_w == 0) {
2503 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2504 } else {
2505 nofp->nof_w--;
2506 }
2507 }
2508 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2509 if (delegated) {
2510 if (nofp->nof_d_rw == 0) {
2511 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2512 } else {
2513 nofp->nof_d_rw--;
2514 }
2515 } else {
2516 if (nofp->nof_rw == 0) {
2517 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2518 } else {
2519 nofp->nof_rw--;
2520 }
2521 }
2522 }
2523 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2524 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2525 if (delegated) {
2526 if (nofp->nof_d_r_dw == 0) {
2527 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2528 } else {
2529 nofp->nof_d_r_dw--;
2530 }
2531 } else {
2532 if (nofp->nof_r_dw == 0) {
2533 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2534 } else {
2535 nofp->nof_r_dw--;
2536 }
2537 }
2538 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2539 if (delegated) {
2540 if (nofp->nof_d_w_dw == 0) {
2541 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2542 } else {
2543 nofp->nof_d_w_dw--;
2544 }
2545 } else {
2546 if (nofp->nof_w_dw == 0) {
2547 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2548 } else {
2549 nofp->nof_w_dw--;
2550 }
2551 }
2552 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2553 if (delegated) {
2554 if (nofp->nof_d_rw_dw == 0) {
2555 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2556 } else {
2557 nofp->nof_d_rw_dw--;
2558 }
2559 } else {
2560 if (nofp->nof_rw_dw == 0) {
2561 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2562 } else {
2563 nofp->nof_rw_dw--;
2564 }
2565 }
2566 }
2567 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2568 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2569 if (delegated) {
2570 if (nofp->nof_d_r_drw == 0) {
2571 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2572 } else {
2573 nofp->nof_d_r_drw--;
2574 }
2575 } else {
2576 if (nofp->nof_r_drw == 0) {
2577 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2578 } else {
2579 nofp->nof_r_drw--;
2580 }
2581 }
2582 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2583 if (delegated) {
2584 if (nofp->nof_d_w_drw == 0) {
2585 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2586 } else {
2587 nofp->nof_d_w_drw--;
2588 }
2589 } else {
2590 if (nofp->nof_w_drw == 0) {
2591 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2592 } else {
2593 nofp->nof_w_drw--;
2594 }
2595 }
2596 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2597 if (delegated) {
2598 if (nofp->nof_d_rw_drw == 0) {
2599 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2600 } else {
2601 nofp->nof_d_rw_drw--;
2602 }
2603 } else {
2604 if (nofp->nof_rw_drw == 0) {
2605 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2606 } else {
2607 nofp->nof_rw_drw--;
2608 }
2609 }
2610 }
2611 }
2612
2613 /* update the modes */
2614 nofp->nof_access = newAccessMode;
2615 nofp->nof_deny = newDenyMode;
2616 nofp->nof_opencnt--;
2617 lck_mtx_unlock(&nofp->nof_lock);
2618 }
2619
2620
2621 /*
2622 * Get the current (delegation, lock, open, default) stateid for this node.
2623 * If node has a delegation, use that stateid.
2624 * If pid has a lock, use the lockowner's stateid.
2625 * Or use the open file's stateid.
2626 * If no open file, use a default stateid of all ones.
2627 */
2628 void
2629 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2630 {
2631 struct nfsmount *nmp = NFSTONMP(np);
2632 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2633 struct nfs_open_owner *noop = NULL;
2634 struct nfs_open_file *nofp = NULL;
2635 struct nfs_lock_owner *nlop = NULL;
2636 nfs_stateid *s = NULL;
2637
2638 if (np->n_openflags & N_DELEG_MASK) {
2639 s = &np->n_dstateid;
2640 } else {
2641 if (p) {
2642 nlop = nfs_lock_owner_find(np, p, 0);
2643 }
2644 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2645 /* we hold locks, use lock stateid */
2646 s = &nlop->nlo_stateid;
2647 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2648 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2649 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2650 nofp->nof_access) {
2651 /* we (should) have the file open, use open stateid */
2652 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2653 nfs4_reopen(nofp, thd);
2654 }
2655 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2656 s = &nofp->nof_stateid;
2657 }
2658 }
2659 }
2660
2661 if (s) {
2662 sid->seqid = s->seqid;
2663 sid->other[0] = s->other[0];
2664 sid->other[1] = s->other[1];
2665 sid->other[2] = s->other[2];
2666 } else {
2667 /* named attributes may not have a stateid for reads, so don't complain for them */
2668 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2669 NP(np, "nfs_get_stateid: no stateid");
2670 }
2671 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2672 }
2673 if (nlop) {
2674 nfs_lock_owner_rele(nlop);
2675 }
2676 if (noop) {
2677 nfs_open_owner_rele(noop);
2678 }
2679 }
2680
2681
2682 /*
2683 * When we have a delegation, we may be able to perform the OPEN locally.
2684 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2685 */
2686 int
2687 nfs4_open_delegated(
2688 nfsnode_t np,
2689 struct nfs_open_file *nofp,
2690 uint32_t accessMode,
2691 uint32_t denyMode,
2692 vfs_context_t ctx)
2693 {
2694 int error = 0, ismember, readtoo = 0, authorized = 0;
2695 uint32_t action;
2696 struct kauth_acl_eval eval;
2697 kauth_cred_t cred = vfs_context_ucred(ctx);
2698
2699 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2700 /*
2701 * Try to open it for read access too,
2702 * so the buffer cache can read data.
2703 */
2704 readtoo = 1;
2705 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2706 }
2707
2708 tryagain:
2709 action = 0;
2710 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2711 action |= KAUTH_VNODE_READ_DATA;
2712 }
2713 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2714 action |= KAUTH_VNODE_WRITE_DATA;
2715 }
2716
2717 /* evaluate ACE (if we have one) */
2718 if (np->n_dace.ace_flags) {
2719 eval.ae_requested = action;
2720 eval.ae_acl = &np->n_dace;
2721 eval.ae_count = 1;
2722 eval.ae_options = 0;
2723 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2724 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2725 }
2726 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2727 if (!error && ismember) {
2728 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2729 }
2730
2731 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2732 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2733 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2734 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2735
2736 error = kauth_acl_evaluate(cred, &eval);
2737
2738 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2739 authorized = 1;
2740 }
2741 }
2742
2743 if (!authorized) {
2744 /* need to ask the server via ACCESS */
2745 struct vnop_access_args naa;
2746 naa.a_desc = &vnop_access_desc;
2747 naa.a_vp = NFSTOV(np);
2748 naa.a_action = action;
2749 naa.a_context = ctx;
2750 if (!(error = nfs_vnop_access(&naa))) {
2751 authorized = 1;
2752 }
2753 }
2754
2755 if (!authorized) {
2756 if (readtoo) {
2757 /* try again without the extra read access */
2758 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2759 readtoo = 0;
2760 goto tryagain;
2761 }
2762 return error ? error : EACCES;
2763 }
2764
2765 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2766
2767 return 0;
2768 }
2769
2770
2771 /*
2772 * Open a file with the given access/deny modes.
2773 *
2774 * If we have a delegation, we may be able to handle the open locally.
2775 * Otherwise, we will always send the open RPC even if this open's mode is
2776 * a subset of all the existing opens. This makes sure that we will always
2777 * be able to do a downgrade to any of the open modes.
2778 *
2779 * Note: local conflicts should have already been checked in nfs_open_file_find().
2780 */
2781 int
2782 nfs4_open(
2783 nfsnode_t np,
2784 struct nfs_open_file *nofp,
2785 uint32_t accessMode,
2786 uint32_t denyMode,
2787 vfs_context_t ctx)
2788 {
2789 vnode_t vp = NFSTOV(np);
2790 vnode_t dvp = NULL;
2791 struct componentname cn;
2792 const char *vname = NULL;
2793 size_t namelen;
2794 char smallname[128];
2795 char *filename = NULL;
2796 int error = 0, readtoo = 0;
2797
2798 /*
2799 * We can handle the OPEN ourselves if we have a delegation,
2800 * unless it's a read delegation and the open is asking for
2801 * either write access or deny read. We also don't bother to
2802 * use the delegation if it's being returned.
2803 */
2804 if (np->n_openflags & N_DELEG_MASK) {
2805 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2806 return error;
2807 }
2808 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2809 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2810 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2811 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2812 nfs_open_state_clear_busy(np);
2813 return error;
2814 }
2815 nfs_open_state_clear_busy(np);
2816 }
2817
2818 /*
2819 * [sigh] We can't trust VFS to get the parent right for named
2820 * attribute nodes. (It likes to reparent the nodes after we've
2821 * created them.) Luckily we can probably get the right parent
2822 * from the n_parent we have stashed away.
2823 */
2824 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2825 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2826 dvp = NULL;
2827 }
2828 if (!dvp) {
2829 dvp = vnode_getparent(vp);
2830 }
2831 vname = vnode_getname(vp);
2832 if (!dvp || !vname) {
2833 if (!error) {
2834 error = EIO;
2835 }
2836 goto out;
2837 }
2838 filename = &smallname[0];
2839 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2840 if (namelen >= sizeof(smallname)) {
2841 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
2842 if (!filename) {
2843 error = ENOMEM;
2844 goto out;
2845 }
2846 snprintf(filename, namelen + 1, "%s", vname);
2847 }
2848 bzero(&cn, sizeof(cn));
2849 cn.cn_nameptr = filename;
2850 cn.cn_namelen = namelen;
2851
2852 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2853 /*
2854 * Try to open it for read access too,
2855 * so the buffer cache can read data.
2856 */
2857 readtoo = 1;
2858 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2859 }
2860 tryagain:
2861 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2862 if (error) {
2863 if (!nfs_mount_state_error_should_restart(error) &&
2864 (error != EINTR) && (error != ERESTART) && readtoo) {
2865 /* try again without the extra read access */
2866 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2867 readtoo = 0;
2868 goto tryagain;
2869 }
2870 goto out;
2871 }
2872 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2873 out:
2874 if (filename && (filename != &smallname[0])) {
2875 FREE(filename, M_TEMP);
2876 }
2877 if (vname) {
2878 vnode_putname(vname);
2879 }
2880 if (dvp != NULLVP) {
2881 vnode_put(dvp);
2882 }
2883 return error;
2884 }
2885
2886 int
2887 nfs_vnop_mmap(
2888 struct vnop_mmap_args /* {
2889 * struct vnodeop_desc *a_desc;
2890 * vnode_t a_vp;
2891 * int a_fflags;
2892 * vfs_context_t a_context;
2893 * } */*ap)
2894 {
2895 vfs_context_t ctx = ap->a_context;
2896 vnode_t vp = ap->a_vp;
2897 nfsnode_t np = VTONFS(vp);
2898 int error = 0, accessMode, denyMode, delegated;
2899 struct nfsmount *nmp;
2900 struct nfs_open_owner *noop = NULL;
2901 struct nfs_open_file *nofp = NULL;
2902
2903 nmp = VTONMP(vp);
2904 if (nfs_mount_gone(nmp)) {
2905 return ENXIO;
2906 }
2907
2908 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2909 return EINVAL;
2910 }
2911 if (np->n_flag & NREVOKE) {
2912 return EIO;
2913 }
2914
2915 /*
2916 * fflags contains some combination of: PROT_READ, PROT_WRITE
2917 * Since it's not possible to mmap() without having the file open for reading,
2918 * read access is always there (regardless if PROT_READ is not set).
2919 */
2920 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2921 if (ap->a_fflags & PROT_WRITE) {
2922 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2923 }
2924 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2925
2926 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2927 if (!noop) {
2928 return ENOMEM;
2929 }
2930
2931 restart:
2932 error = nfs_mount_state_in_use_start(nmp, NULL);
2933 if (error) {
2934 nfs_open_owner_rele(noop);
2935 return error;
2936 }
2937 if (np->n_flag & NREVOKE) {
2938 error = EIO;
2939 nfs_mount_state_in_use_end(nmp, 0);
2940 nfs_open_owner_rele(noop);
2941 return error;
2942 }
2943
2944 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2945 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2946 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2947 error = EPERM;
2948 }
2949 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2950 nfs_mount_state_in_use_end(nmp, 0);
2951 error = nfs4_reopen(nofp, NULL);
2952 nofp = NULL;
2953 if (!error) {
2954 goto restart;
2955 }
2956 }
2957 if (!error) {
2958 error = nfs_open_file_set_busy(nofp, NULL);
2959 }
2960 if (error) {
2961 nofp = NULL;
2962 goto out;
2963 }
2964
2965 /*
2966 * The open reference for mmap must mirror an existing open because
2967 * we may need to reclaim it after the file is closed.
2968 * So grab another open count matching the accessMode passed in.
2969 * If we already had an mmap open, prefer read/write without deny mode.
2970 * This means we may have to drop the current mmap open first.
2971 *
2972 * N.B. We should have an open for the mmap, because, mmap was
2973 * called on an open descriptor, or we've created an open for read
2974 * from reading the first page for execve. However, if we piggy
2975 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2976 * that open may have closed.
2977 */
2978
2979 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2980 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2981 /* We shouldn't get here. We've already open the file for execve */
2982 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2983 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
2984 }
2985 /*
2986 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
2987 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
2988 */
2989 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
2990 /* not asking for just read access -> fail */
2991 error = EPERM;
2992 goto out;
2993 }
2994 /* we don't have the file open, so open it for read access */
2995 if (nmp->nm_vers < NFS_VER4) {
2996 /* NFS v2/v3 opens are always allowed - so just add it. */
2997 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
2998 error = 0;
2999 } else {
3000 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3001 }
3002 if (!error) {
3003 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3004 }
3005 if (error) {
3006 goto out;
3007 }
3008 }
3009
3010 /* determine deny mode for open */
3011 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3012 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3013 delegated = 1;
3014 if (nofp->nof_d_rw) {
3015 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3016 } else if (nofp->nof_d_rw_dw) {
3017 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3018 } else if (nofp->nof_d_rw_drw) {
3019 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3020 }
3021 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3022 delegated = 0;
3023 if (nofp->nof_rw) {
3024 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3025 } else if (nofp->nof_rw_dw) {
3026 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3027 } else if (nofp->nof_rw_drw) {
3028 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3029 }
3030 } else {
3031 error = EPERM;
3032 }
3033 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3034 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3035 delegated = 1;
3036 if (nofp->nof_d_r) {
3037 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3038 } else if (nofp->nof_d_r_dw) {
3039 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3040 } else if (nofp->nof_d_r_drw) {
3041 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3042 }
3043 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3044 delegated = 0;
3045 if (nofp->nof_r) {
3046 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3047 } else if (nofp->nof_r_dw) {
3048 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3049 } else if (nofp->nof_r_drw) {
3050 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3051 }
3052 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3053 /*
3054 * This clause and the one below is to co-opt a read write access
3055 * for a read only mmaping. We probably got here in that an
3056 * existing rw open for an executable file already exists.
3057 */
3058 delegated = 1;
3059 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3060 if (nofp->nof_d_rw) {
3061 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3062 } else if (nofp->nof_d_rw_dw) {
3063 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3064 } else if (nofp->nof_d_rw_drw) {
3065 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3066 }
3067 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3068 delegated = 0;
3069 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3070 if (nofp->nof_rw) {
3071 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3072 } else if (nofp->nof_rw_dw) {
3073 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3074 } else if (nofp->nof_rw_drw) {
3075 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3076 }
3077 } else {
3078 error = EPERM;
3079 }
3080 }
3081 if (error) { /* mmap mode without proper open mode */
3082 goto out;
3083 }
3084
3085 /*
3086 * If the existing mmap access is more than the new access OR the
3087 * existing access is the same and the existing deny mode is less,
3088 * then we'll stick with the existing mmap open mode.
3089 */
3090 if ((nofp->nof_mmap_access > accessMode) ||
3091 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3092 goto out;
3093 }
3094
3095 /* update mmap open mode */
3096 if (nofp->nof_mmap_access) {
3097 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3098 if (error) {
3099 if (!nfs_mount_state_error_should_restart(error)) {
3100 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3101 }
3102 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3103 goto out;
3104 }
3105 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3106 }
3107
3108 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3109 nofp->nof_mmap_access = accessMode;
3110 nofp->nof_mmap_deny = denyMode;
3111
3112 out:
3113 if (nofp) {
3114 nfs_open_file_clear_busy(nofp);
3115 }
3116 if (nfs_mount_state_in_use_end(nmp, error)) {
3117 nofp = NULL;
3118 goto restart;
3119 }
3120 if (noop) {
3121 nfs_open_owner_rele(noop);
3122 }
3123
3124 if (!error) {
3125 int ismapped = 0;
3126 nfs_node_lock_force(np);
3127 if ((np->n_flag & NISMAPPED) == 0) {
3128 np->n_flag |= NISMAPPED;
3129 ismapped = 1;
3130 }
3131 nfs_node_unlock(np);
3132 if (ismapped) {
3133 lck_mtx_lock(&nmp->nm_lock);
3134 nmp->nm_state &= ~NFSSTA_SQUISHY;
3135 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3136 if (nmp->nm_curdeadtimeout <= 0) {
3137 nmp->nm_deadto_start = 0;
3138 }
3139 nmp->nm_mappers++;
3140 lck_mtx_unlock(&nmp->nm_lock);
3141 }
3142 }
3143
3144 return error;
3145 }
3146
3147
3148 int
3149 nfs_vnop_mnomap(
3150 struct vnop_mnomap_args /* {
3151 * struct vnodeop_desc *a_desc;
3152 * vnode_t a_vp;
3153 * vfs_context_t a_context;
3154 * } */*ap)
3155 {
3156 vfs_context_t ctx = ap->a_context;
3157 vnode_t vp = ap->a_vp;
3158 nfsnode_t np = VTONFS(vp);
3159 struct nfsmount *nmp;
3160 struct nfs_open_file *nofp = NULL;
3161 off_t size;
3162 int error;
3163 int is_mapped_flag = 0;
3164
3165 nmp = VTONMP(vp);
3166 if (nfs_mount_gone(nmp)) {
3167 return ENXIO;
3168 }
3169
3170 nfs_node_lock_force(np);
3171 if (np->n_flag & NISMAPPED) {
3172 is_mapped_flag = 1;
3173 np->n_flag &= ~NISMAPPED;
3174 }
3175 nfs_node_unlock(np);
3176 if (is_mapped_flag) {
3177 lck_mtx_lock(&nmp->nm_lock);
3178 if (nmp->nm_mappers) {
3179 nmp->nm_mappers--;
3180 } else {
3181 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3182 }
3183 lck_mtx_unlock(&nmp->nm_lock);
3184 }
3185
3186 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3187 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3188 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3189 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3190 }
3191
3192 /* walk all open files and close all mmap opens */
3193 loop:
3194 error = nfs_mount_state_in_use_start(nmp, NULL);
3195 if (error) {
3196 return error;
3197 }
3198 lck_mtx_lock(&np->n_openlock);
3199 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3200 if (!nofp->nof_mmap_access) {
3201 continue;
3202 }
3203 lck_mtx_unlock(&np->n_openlock);
3204 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3205 nfs_mount_state_in_use_end(nmp, 0);
3206 error = nfs4_reopen(nofp, NULL);
3207 if (!error) {
3208 goto loop;
3209 }
3210 }
3211 if (!error) {
3212 error = nfs_open_file_set_busy(nofp, NULL);
3213 }
3214 if (error) {
3215 lck_mtx_lock(&np->n_openlock);
3216 break;
3217 }
3218 if (nofp->nof_mmap_access) {
3219 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3220 if (!nfs_mount_state_error_should_restart(error)) {
3221 if (error) { /* not a state-operation-restarting error, so just clear the access */
3222 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3223 }
3224 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3225 }
3226 if (error) {
3227 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3228 }
3229 }
3230 nfs_open_file_clear_busy(nofp);
3231 nfs_mount_state_in_use_end(nmp, error);
3232 goto loop;
3233 }
3234 lck_mtx_unlock(&np->n_openlock);
3235 nfs_mount_state_in_use_end(nmp, error);
3236 return error;
3237 }
3238
3239 /*
3240 * Search a node's lock owner list for the owner for this process.
3241 * If not found and "alloc" is set, then allocate a new one.
3242 */
3243 struct nfs_lock_owner *
3244 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3245 {
3246 pid_t pid = proc_pid(p);
3247 struct nfs_lock_owner *nlop, *newnlop = NULL;
3248
3249 tryagain:
3250 lck_mtx_lock(&np->n_openlock);
3251 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3252 os_ref_count_t newcount;
3253
3254 if (nlop->nlo_pid != pid) {
3255 continue;
3256 }
3257 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
3258 break;
3259 }
3260 /* stale lock owner... reuse it if we can */
3261 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3262 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3263 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3264 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3265 lck_mtx_unlock(&np->n_openlock);
3266 goto tryagain;
3267 }
3268 nlop->nlo_pid_start = p->p_start;
3269 nlop->nlo_seqid = 0;
3270 nlop->nlo_stategenid = 0;
3271 break;
3272 }
3273
3274 if (!nlop && !newnlop && alloc) {
3275 lck_mtx_unlock(&np->n_openlock);
3276 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3277 if (!newnlop) {
3278 return NULL;
3279 }
3280 bzero(newnlop, sizeof(*newnlop));
3281 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3282 newnlop->nlo_pid = pid;
3283 newnlop->nlo_pid_start = p->p_start;
3284 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3285 TAILQ_INIT(&newnlop->nlo_locks);
3286 goto tryagain;
3287 }
3288 if (!nlop && newnlop) {
3289 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3290 os_ref_init(&newnlop->nlo_refcnt, NULL);
3291 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3292 nlop = newnlop;
3293 }
3294 lck_mtx_unlock(&np->n_openlock);
3295
3296 if (newnlop && (nlop != newnlop)) {
3297 nfs_lock_owner_destroy(newnlop);
3298 }
3299
3300 if (nlop) {
3301 nfs_lock_owner_ref(nlop);
3302 }
3303
3304 return nlop;
3305 }
3306
3307 /*
3308 * destroy a lock owner that's no longer needed
3309 */
3310 void
3311 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3312 {
3313 if (nlop->nlo_open_owner) {
3314 nfs_open_owner_rele(nlop->nlo_open_owner);
3315 nlop->nlo_open_owner = NULL;
3316 }
3317 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3318 FREE(nlop, M_TEMP);
3319 }
3320
3321 /*
3322 * acquire a reference count on a lock owner
3323 */
3324 void
3325 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3326 {
3327 lck_mtx_lock(&nlop->nlo_lock);
3328 os_ref_retain_locked(&nlop->nlo_refcnt);
3329 lck_mtx_unlock(&nlop->nlo_lock);
3330 }
3331
3332 /*
3333 * drop a reference count on a lock owner and destroy it if
3334 * it is no longer referenced and no longer on the mount's list.
3335 */
3336 void
3337 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3338 {
3339 os_ref_count_t newcount;
3340
3341 lck_mtx_lock(&nlop->nlo_lock);
3342 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3343 panic("nfs_lock_owner_rele: no refcnt");
3344 }
3345 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3346 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3347 panic("nfs_lock_owner_rele: busy");
3348 }
3349 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3350 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3351 lck_mtx_unlock(&nlop->nlo_lock);
3352 return;
3353 }
3354 /* owner is no longer referenced or linked to mount, so destroy it */
3355 lck_mtx_unlock(&nlop->nlo_lock);
3356 nfs_lock_owner_destroy(nlop);
3357 }
3358
3359 /*
3360 * Mark a lock owner as busy because we are about to
3361 * start an operation that uses and updates lock owner state.
3362 */
3363 int
3364 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3365 {
3366 struct nfsmount *nmp;
3367 struct timespec ts = {2, 0};
3368 int error = 0, slpflag;
3369
3370 nmp = nlop->nlo_open_owner->noo_mount;
3371 if (nfs_mount_gone(nmp)) {
3372 return ENXIO;
3373 }
3374 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3375
3376 lck_mtx_lock(&nlop->nlo_lock);
3377 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3378 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3379 break;
3380 }
3381 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3382 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3383 slpflag = 0;
3384 }
3385 if (!error) {
3386 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3387 }
3388 lck_mtx_unlock(&nlop->nlo_lock);
3389
3390 return error;
3391 }
3392
3393 /*
3394 * Clear the busy flag on a lock owner and wake up anyone waiting
3395 * to mark it busy.
3396 */
3397 void
3398 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3399 {
3400 int wanted;
3401
3402 lck_mtx_lock(&nlop->nlo_lock);
3403 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3404 panic("nfs_lock_owner_clear_busy");
3405 }
3406 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3407 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3408 lck_mtx_unlock(&nlop->nlo_lock);
3409 if (wanted) {
3410 wakeup(nlop);
3411 }
3412 }
3413
3414 /*
3415 * Insert a held lock into a lock owner's sorted list.
3416 * (flock locks are always inserted at the head the list)
3417 */
3418 void
3419 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3420 {
3421 struct nfs_file_lock *nflp;
3422
3423 /* insert new lock in lock owner's held lock list */
3424 lck_mtx_lock(&nlop->nlo_lock);
3425 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3426 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3427 } else {
3428 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3429 if (newnflp->nfl_start < nflp->nfl_start) {
3430 break;
3431 }
3432 }
3433 if (nflp) {
3434 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3435 } else {
3436 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3437 }
3438 }
3439 lck_mtx_unlock(&nlop->nlo_lock);
3440 }
3441
3442 /*
3443 * Get a file lock structure for this lock owner.
3444 */
3445 struct nfs_file_lock *
3446 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3447 {
3448 struct nfs_file_lock *nflp = NULL;
3449
3450 lck_mtx_lock(&nlop->nlo_lock);
3451 if (!nlop->nlo_alock.nfl_owner) {
3452 nflp = &nlop->nlo_alock;
3453 nflp->nfl_owner = nlop;
3454 }
3455 lck_mtx_unlock(&nlop->nlo_lock);
3456 if (!nflp) {
3457 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3458 if (!nflp) {
3459 return NULL;
3460 }
3461 bzero(nflp, sizeof(*nflp));
3462 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3463 nflp->nfl_owner = nlop;
3464 }
3465 nfs_lock_owner_ref(nlop);
3466 return nflp;
3467 }
3468
3469 /*
3470 * destroy the given NFS file lock structure
3471 */
3472 void
3473 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3474 {
3475 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3476
3477 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3478 nflp->nfl_owner = NULL;
3479 FREE(nflp, M_TEMP);
3480 } else {
3481 lck_mtx_lock(&nlop->nlo_lock);
3482 bzero(nflp, sizeof(*nflp));
3483 lck_mtx_unlock(&nlop->nlo_lock);
3484 }
3485 nfs_lock_owner_rele(nlop);
3486 }
3487
3488 /*
3489 * Check if one file lock conflicts with another.
3490 * (nflp1 is the new lock. nflp2 is the existing lock.)
3491 */
3492 int
3493 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3494 {
3495 /* no conflict if lock is dead */
3496 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3497 return 0;
3498 }
3499 /* no conflict if it's ours - unless the lock style doesn't match */
3500 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3501 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3502 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3503 (nflp1->nfl_start > nflp2->nfl_start) &&
3504 (nflp1->nfl_end < nflp2->nfl_end)) {
3505 *willsplit = 1;
3506 }
3507 return 0;
3508 }
3509 /* no conflict if ranges don't overlap */
3510 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3511 return 0;
3512 }
3513 /* no conflict if neither lock is exclusive */
3514 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3515 return 0;
3516 }
3517 /* conflict */
3518 return 1;
3519 }
3520
3521 /*
3522 * Send an NFSv4 LOCK RPC to the server.
3523 */
3524 int
3525 nfs4_setlock_rpc(
3526 nfsnode_t np,
3527 struct nfs_open_file *nofp,
3528 struct nfs_file_lock *nflp,
3529 int reclaim,
3530 int flags,
3531 thread_t thd,
3532 kauth_cred_t cred)
3533 {
3534 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3535 struct nfsmount *nmp;
3536 struct nfsm_chain nmreq, nmrep;
3537 uint64_t xid;
3538 uint32_t locktype;
3539 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3540 struct nfsreq_secinfo_args si;
3541
3542 nmp = NFSTONMP(np);
3543 if (nfs_mount_gone(nmp)) {
3544 return ENXIO;
3545 }
3546 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3547 return EINVAL;
3548 }
3549
3550 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3551 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3552 ((nflp->nfl_type == F_WRLCK) ?
3553 NFS_LOCK_TYPE_WRITEW :
3554 NFS_LOCK_TYPE_READW) :
3555 ((nflp->nfl_type == F_WRLCK) ?
3556 NFS_LOCK_TYPE_WRITE :
3557 NFS_LOCK_TYPE_READ);
3558 if (newlocker) {
3559 error = nfs_open_file_set_busy(nofp, thd);
3560 if (error) {
3561 return error;
3562 }
3563 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3564 if (error) {
3565 nfs_open_file_clear_busy(nofp);
3566 return error;
3567 }
3568 if (!nlop->nlo_open_owner) {
3569 nfs_open_owner_ref(nofp->nof_owner);
3570 nlop->nlo_open_owner = nofp->nof_owner;
3571 }
3572 }
3573 error = nfs_lock_owner_set_busy(nlop, thd);
3574 if (error) {
3575 if (newlocker) {
3576 nfs_open_owner_clear_busy(nofp->nof_owner);
3577 nfs_open_file_clear_busy(nofp);
3578 }
3579 return error;
3580 }
3581
3582 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3583 nfsm_chain_null(&nmreq);
3584 nfsm_chain_null(&nmrep);
3585
3586 // PUTFH, GETATTR, LOCK
3587 numops = 3;
3588 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3589 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3590 numops--;
3591 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3592 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3593 numops--;
3594 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3595 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3596 numops--;
3597 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3598 nfsm_chain_add_32(error, &nmreq, locktype);
3599 nfsm_chain_add_32(error, &nmreq, reclaim);
3600 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3601 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3602 nfsm_chain_add_32(error, &nmreq, newlocker);
3603 if (newlocker) {
3604 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3605 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3606 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3607 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3608 } else {
3609 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3610 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3611 }
3612 nfsm_chain_build_done(error, &nmreq);
3613 nfsm_assert(error, (numops == 0), EPROTO);
3614 nfsmout_if(error);
3615
3616 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3617
3618 if ((lockerror = nfs_node_lock(np))) {
3619 error = lockerror;
3620 }
3621 nfsm_chain_skip_tag(error, &nmrep);
3622 nfsm_chain_get_32(error, &nmrep, numops);
3623 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3624 nfsmout_if(error);
3625 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3626 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3627 nfsmout_if(error);
3628 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3629 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3630 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3631
3632 /* Update the lock owner's stategenid once it appears the server has state for it. */
3633 /* We determine this by noting the request was successful (we got a stateid). */
3634 if (newlocker && !error) {
3635 nlop->nlo_stategenid = nmp->nm_stategenid;
3636 }
3637 nfsmout:
3638 if (!lockerror) {
3639 nfs_node_unlock(np);
3640 }
3641 nfs_lock_owner_clear_busy(nlop);
3642 if (newlocker) {
3643 nfs_open_owner_clear_busy(nofp->nof_owner);
3644 nfs_open_file_clear_busy(nofp);
3645 }
3646 nfsm_chain_cleanup(&nmreq);
3647 nfsm_chain_cleanup(&nmrep);
3648 return error;
3649 }
3650
3651 /*
3652 * Send an NFSv4 LOCKU RPC to the server.
3653 */
3654 int
3655 nfs4_unlock_rpc(
3656 nfsnode_t np,
3657 struct nfs_lock_owner *nlop,
3658 int type,
3659 uint64_t start,
3660 uint64_t end,
3661 int flags,
3662 thread_t thd,
3663 kauth_cred_t cred)
3664 {
3665 struct nfsmount *nmp;
3666 struct nfsm_chain nmreq, nmrep;
3667 uint64_t xid;
3668 int error = 0, lockerror = ENOENT, numops, status;
3669 struct nfsreq_secinfo_args si;
3670
3671 nmp = NFSTONMP(np);
3672 if (nfs_mount_gone(nmp)) {
3673 return ENXIO;
3674 }
3675 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3676 return EINVAL;
3677 }
3678
3679 error = nfs_lock_owner_set_busy(nlop, NULL);
3680 if (error) {
3681 return error;
3682 }
3683
3684 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3685 nfsm_chain_null(&nmreq);
3686 nfsm_chain_null(&nmrep);
3687
3688 // PUTFH, GETATTR, LOCKU
3689 numops = 3;
3690 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3691 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3692 numops--;
3693 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3694 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3695 numops--;
3696 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3697 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3698 numops--;
3699 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3700 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3701 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3702 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3703 nfsm_chain_add_64(error, &nmreq, start);
3704 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3705 nfsm_chain_build_done(error, &nmreq);
3706 nfsm_assert(error, (numops == 0), EPROTO);
3707 nfsmout_if(error);
3708
3709 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3710
3711 if ((lockerror = nfs_node_lock(np))) {
3712 error = lockerror;
3713 }
3714 nfsm_chain_skip_tag(error, &nmrep);
3715 nfsm_chain_get_32(error, &nmrep, numops);
3716 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3717 nfsmout_if(error);
3718 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3719 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3720 nfsmout_if(error);
3721 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3722 nfs_owner_seqid_increment(NULL, nlop, error);
3723 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3724 nfsmout:
3725 if (!lockerror) {
3726 nfs_node_unlock(np);
3727 }
3728 nfs_lock_owner_clear_busy(nlop);
3729 nfsm_chain_cleanup(&nmreq);
3730 nfsm_chain_cleanup(&nmrep);
3731 return error;
3732 }
3733
3734 /*
3735 * Send an NFSv4 LOCKT RPC to the server.
3736 */
3737 int
3738 nfs4_getlock_rpc(
3739 nfsnode_t np,
3740 struct nfs_lock_owner *nlop,
3741 struct flock *fl,
3742 uint64_t start,
3743 uint64_t end,
3744 vfs_context_t ctx)
3745 {
3746 struct nfsmount *nmp;
3747 struct nfsm_chain nmreq, nmrep;
3748 uint64_t xid, val64 = 0;
3749 uint32_t val = 0;
3750 int error = 0, lockerror, numops, status;
3751 struct nfsreq_secinfo_args si;
3752
3753 nmp = NFSTONMP(np);
3754 if (nfs_mount_gone(nmp)) {
3755 return ENXIO;
3756 }
3757 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3758 return EINVAL;
3759 }
3760
3761 lockerror = ENOENT;
3762 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3763 nfsm_chain_null(&nmreq);
3764 nfsm_chain_null(&nmrep);
3765
3766 // PUTFH, GETATTR, LOCKT
3767 numops = 3;
3768 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3769 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3770 numops--;
3771 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3772 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3773 numops--;
3774 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3775 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3776 numops--;
3777 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3778 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3779 nfsm_chain_add_64(error, &nmreq, start);
3780 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3781 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3782 nfsm_chain_build_done(error, &nmreq);
3783 nfsm_assert(error, (numops == 0), EPROTO);
3784 nfsmout_if(error);
3785
3786 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3787
3788 if ((lockerror = nfs_node_lock(np))) {
3789 error = lockerror;
3790 }
3791 nfsm_chain_skip_tag(error, &nmrep);
3792 nfsm_chain_get_32(error, &nmrep, numops);
3793 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3794 nfsmout_if(error);
3795 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3796 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3797 nfsmout_if(error);
3798 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3799 if (error == NFSERR_DENIED) {
3800 error = 0;
3801 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3802 nfsm_chain_get_64(error, &nmrep, val64);
3803 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3804 nfsm_chain_get_32(error, &nmrep, val);
3805 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3806 fl->l_pid = 0;
3807 fl->l_whence = SEEK_SET;
3808 } else if (!error) {
3809 fl->l_type = F_UNLCK;
3810 }
3811 nfsmout:
3812 if (!lockerror) {
3813 nfs_node_unlock(np);
3814 }
3815 nfsm_chain_cleanup(&nmreq);
3816 nfsm_chain_cleanup(&nmrep);
3817 return error;
3818 }
3819
3820
3821 /*
3822 * Check for any conflicts with the given lock.
3823 *
3824 * Checking for a lock doesn't require the file to be opened.
3825 * So we skip all the open owner, open file, lock owner work
3826 * and just check for a conflicting lock.
3827 */
3828 int
3829 nfs_advlock_getlock(
3830 nfsnode_t np,
3831 struct nfs_lock_owner *nlop,
3832 struct flock *fl,
3833 uint64_t start,
3834 uint64_t end,
3835 vfs_context_t ctx)
3836 {
3837 struct nfsmount *nmp;
3838 struct nfs_file_lock *nflp;
3839 int error = 0, answered = 0;
3840
3841 nmp = NFSTONMP(np);
3842 if (nfs_mount_gone(nmp)) {
3843 return ENXIO;
3844 }
3845
3846 restart:
3847 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3848 return error;
3849 }
3850
3851 lck_mtx_lock(&np->n_openlock);
3852 /* scan currently held locks for conflict */
3853 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3854 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3855 continue;
3856 }
3857 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3858 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3859 break;
3860 }
3861 }
3862 if (nflp) {
3863 /* found a conflicting lock */
3864 fl->l_type = nflp->nfl_type;
3865 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3866 fl->l_start = nflp->nfl_start;
3867 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3868 fl->l_whence = SEEK_SET;
3869 answered = 1;
3870 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3871 /*
3872 * If we have a write delegation, we know there can't be other
3873 * locks on the server. So the answer is no conflicting lock found.
3874 */
3875 fl->l_type = F_UNLCK;
3876 answered = 1;
3877 }
3878 lck_mtx_unlock(&np->n_openlock);
3879 if (answered) {
3880 nfs_mount_state_in_use_end(nmp, 0);
3881 return 0;
3882 }
3883
3884 /* no conflict found locally, so ask the server */
3885 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3886
3887 if (nfs_mount_state_in_use_end(nmp, error)) {
3888 goto restart;
3889 }
3890 return error;
3891 }
3892
3893 /*
3894 * Acquire a file lock for the given range.
3895 *
3896 * Add the lock (request) to the lock queue.
3897 * Scan the lock queue for any conflicting locks.
3898 * If a conflict is found, block or return an error.
3899 * Once end of queue is reached, send request to the server.
3900 * If the server grants the lock, scan the lock queue and
3901 * update any existing locks. Then (optionally) scan the
3902 * queue again to coalesce any locks adjacent to the new one.
3903 */
3904 int
3905 nfs_advlock_setlock(
3906 nfsnode_t np,
3907 struct nfs_open_file *nofp,
3908 struct nfs_lock_owner *nlop,
3909 int op,
3910 uint64_t start,
3911 uint64_t end,
3912 int style,
3913 short type,
3914 vfs_context_t ctx)
3915 {
3916 struct nfsmount *nmp;
3917 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3918 struct nfs_file_lock *coalnflp;
3919 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3920 struct timespec ts = {1, 0};
3921
3922 nmp = NFSTONMP(np);
3923 if (nfs_mount_gone(nmp)) {
3924 return ENXIO;
3925 }
3926 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3927
3928 if ((type != F_RDLCK) && (type != F_WRLCK)) {
3929 return EINVAL;
3930 }
3931
3932 /* allocate a new lock */
3933 newnflp = nfs_file_lock_alloc(nlop);
3934 if (!newnflp) {
3935 return ENOLCK;
3936 }
3937 newnflp->nfl_start = start;
3938 newnflp->nfl_end = end;
3939 newnflp->nfl_type = type;
3940 if (op == F_SETLKW) {
3941 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3942 }
3943 newnflp->nfl_flags |= style;
3944 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3945
3946 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3947 /*
3948 * For exclusive flock-style locks, if we block waiting for the
3949 * lock, we need to first release any currently held shared
3950 * flock-style lock. So, the first thing we do is check if we
3951 * have a shared flock-style lock.
3952 */
3953 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3954 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
3955 nflp = NULL;
3956 }
3957 if (nflp && (nflp->nfl_type != F_RDLCK)) {
3958 nflp = NULL;
3959 }
3960 flocknflp = nflp;
3961 }
3962
3963 restart:
3964 restart = 0;
3965 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3966 if (error) {
3967 goto error_out;
3968 }
3969 inuse = 1;
3970 if (np->n_flag & NREVOKE) {
3971 error = EIO;
3972 nfs_mount_state_in_use_end(nmp, 0);
3973 inuse = 0;
3974 goto error_out;
3975 }
3976 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3977 nfs_mount_state_in_use_end(nmp, 0);
3978 inuse = 0;
3979 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
3980 if (error) {
3981 goto error_out;
3982 }
3983 goto restart;
3984 }
3985
3986 lck_mtx_lock(&np->n_openlock);
3987 if (!inqueue) {
3988 /* insert new lock at beginning of list */
3989 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
3990 inqueue = 1;
3991 }
3992
3993 /* scan current list of locks (held and pending) for conflicts */
3994 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
3995 nextnflp = TAILQ_NEXT(nflp, nfl_link);
3996 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
3997 continue;
3998 }
3999 /* Conflict */
4000 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4001 error = EAGAIN;
4002 break;
4003 }
4004 /* Block until this lock is no longer held. */
4005 if (nflp->nfl_blockcnt == UINT_MAX) {
4006 error = ENOLCK;
4007 break;
4008 }
4009 nflp->nfl_blockcnt++;
4010 do {
4011 if (flocknflp) {
4012 /* release any currently held shared lock before sleeping */
4013 lck_mtx_unlock(&np->n_openlock);
4014 nfs_mount_state_in_use_end(nmp, 0);
4015 inuse = 0;
4016 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4017 flocknflp = NULL;
4018 if (!error) {
4019 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4020 }
4021 if (error) {
4022 lck_mtx_lock(&np->n_openlock);
4023 break;
4024 }
4025 inuse = 1;
4026 lck_mtx_lock(&np->n_openlock);
4027 /* no need to block/sleep if the conflict is gone */
4028 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4029 break;
4030 }
4031 }
4032 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4033 slpflag = 0;
4034 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4035 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4036 /* looks like we have a recover pending... restart */
4037 restart = 1;
4038 lck_mtx_unlock(&np->n_openlock);
4039 nfs_mount_state_in_use_end(nmp, 0);
4040 inuse = 0;
4041 lck_mtx_lock(&np->n_openlock);
4042 break;
4043 }
4044 if (!error && (np->n_flag & NREVOKE)) {
4045 error = EIO;
4046 }
4047 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4048 nflp->nfl_blockcnt--;
4049 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4050 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4051 nfs_file_lock_destroy(nflp);
4052 }
4053 if (error || restart) {
4054 break;
4055 }
4056 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4057 /* So, start this lock-scanning loop over from where it started. */
4058 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4059 }
4060 lck_mtx_unlock(&np->n_openlock);
4061 if (restart) {
4062 goto restart;
4063 }
4064 if (error) {
4065 goto error_out;
4066 }
4067
4068 if (willsplit) {
4069 /*
4070 * It looks like this operation is splitting a lock.
4071 * We allocate a new lock now so we don't have to worry
4072 * about the allocation failing after we've updated some state.
4073 */
4074 nflp2 = nfs_file_lock_alloc(nlop);
4075 if (!nflp2) {
4076 error = ENOLCK;
4077 goto error_out;
4078 }
4079 }
4080
4081 /* once scan for local conflicts is clear, send request to server */
4082 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4083 goto error_out;
4084 }
4085 busy = 1;
4086 delay = 0;
4087 do {
4088 /* do we have a delegation? (that we're not returning?) */
4089 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4090 if (np->n_openflags & N_DELEG_WRITE) {
4091 /* with a write delegation, just take the lock delegated */
4092 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4093 error = 0;
4094 /* make sure the lock owner knows its open owner */
4095 if (!nlop->nlo_open_owner) {
4096 nfs_open_owner_ref(nofp->nof_owner);
4097 nlop->nlo_open_owner = nofp->nof_owner;
4098 }
4099 break;
4100 } else {
4101 /*
4102 * If we don't have any non-delegated opens but we do have
4103 * delegated opens, then we need to first claim the delegated
4104 * opens so that the lock request on the server can be associated
4105 * with an open it knows about.
4106 */
4107 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4108 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4109 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4110 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4111 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4112 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4113 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4114 if (error) {
4115 break;
4116 }
4117 }
4118 }
4119 }
4120 if (np->n_flag & NREVOKE) {
4121 error = EIO;
4122 }
4123 if (!error) {
4124 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4125 }
4126 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4127 break;
4128 }
4129 /* request was denied due to either conflict or grace period */
4130 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4131 error = EAGAIN;
4132 break;
4133 }
4134 if (flocknflp) {
4135 /* release any currently held shared lock before sleeping */
4136 nfs_open_state_clear_busy(np);
4137 busy = 0;
4138 nfs_mount_state_in_use_end(nmp, 0);
4139 inuse = 0;
4140 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4141 flocknflp = NULL;
4142 if (!error2) {
4143 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4144 }
4145 if (!error2) {
4146 inuse = 1;
4147 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4148 }
4149 if (error2) {
4150 error = error2;
4151 break;
4152 }
4153 busy = 1;
4154 }
4155 /*
4156 * Wait a little bit and send the request again.
4157 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4158 */
4159 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4160 if (error == NFSERR_GRACE) {
4161 delay = 4;
4162 }
4163 if (delay < 4) {
4164 delay++;
4165 }
4166 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4167 slpflag = 0;
4168 }
4169 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4170 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4171 /* looks like we have a recover pending... restart */
4172 nfs_open_state_clear_busy(np);
4173 busy = 0;
4174 nfs_mount_state_in_use_end(nmp, 0);
4175 inuse = 0;
4176 goto restart;
4177 }
4178 if (!error && (np->n_flag & NREVOKE)) {
4179 error = EIO;
4180 }
4181 } while (!error);
4182
4183 error_out:
4184 if (nfs_mount_state_error_should_restart(error)) {
4185 /* looks like we need to restart this operation */
4186 if (busy) {
4187 nfs_open_state_clear_busy(np);
4188 busy = 0;
4189 }
4190 if (inuse) {
4191 nfs_mount_state_in_use_end(nmp, error);
4192 inuse = 0;
4193 }
4194 goto restart;
4195 }
4196 lck_mtx_lock(&np->n_openlock);
4197 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4198 if (error) {
4199 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4200 if (newnflp->nfl_blockcnt) {
4201 /* wake up anyone blocked on this lock */
4202 wakeup(newnflp);
4203 } else {
4204 /* remove newnflp from lock list and destroy */
4205 if (inqueue) {
4206 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4207 }
4208 nfs_file_lock_destroy(newnflp);
4209 }
4210 lck_mtx_unlock(&np->n_openlock);
4211 if (busy) {
4212 nfs_open_state_clear_busy(np);
4213 }
4214 if (inuse) {
4215 nfs_mount_state_in_use_end(nmp, error);
4216 }
4217 if (nflp2) {
4218 nfs_file_lock_destroy(nflp2);
4219 }
4220 return error;
4221 }
4222
4223 /* server granted the lock */
4224
4225 /*
4226 * Scan for locks to update.
4227 *
4228 * Locks completely covered are killed.
4229 * At most two locks may need to be clipped.
4230 * It's possible that a single lock may need to be split.
4231 */
4232 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4233 if (nflp == newnflp) {
4234 continue;
4235 }
4236 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4237 continue;
4238 }
4239 if (nflp->nfl_owner != nlop) {
4240 continue;
4241 }
4242 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4243 continue;
4244 }
4245 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4246 continue;
4247 }
4248 /* here's one to update */
4249 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4250 /* The entire lock is being replaced. */
4251 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4252 lck_mtx_lock(&nlop->nlo_lock);
4253 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4254 lck_mtx_unlock(&nlop->nlo_lock);
4255 /* lock will be destroyed below, if no waiters */
4256 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4257 /* We're replacing a range in the middle of a lock. */
4258 /* The current lock will be split into two locks. */
4259 /* Update locks and insert new lock after current lock. */
4260 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4261 nflp2->nfl_type = nflp->nfl_type;
4262 nflp2->nfl_start = newnflp->nfl_end + 1;
4263 nflp2->nfl_end = nflp->nfl_end;
4264 nflp->nfl_end = newnflp->nfl_start - 1;
4265 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4266 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4267 nextnflp = nflp2;
4268 nflp2 = NULL;
4269 } else if (newnflp->nfl_start > nflp->nfl_start) {
4270 /* We're replacing the end of a lock. */
4271 nflp->nfl_end = newnflp->nfl_start - 1;
4272 } else if (newnflp->nfl_end < nflp->nfl_end) {
4273 /* We're replacing the start of a lock. */
4274 nflp->nfl_start = newnflp->nfl_end + 1;
4275 }
4276 if (nflp->nfl_blockcnt) {
4277 /* wake up anyone blocked on this lock */
4278 wakeup(nflp);
4279 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4280 /* remove nflp from lock list and destroy */
4281 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4282 nfs_file_lock_destroy(nflp);
4283 }
4284 }
4285
4286 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4287
4288 /*
4289 * POSIX locks should be coalesced when possible.
4290 */
4291 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4292 /*
4293 * Walk through the lock queue and check each of our held locks with
4294 * the previous and next locks in the lock owner's "held lock list".
4295 * If the two locks can be coalesced, we merge the current lock into
4296 * the other (previous or next) lock. Merging this way makes sure that
4297 * lock ranges are always merged forward in the lock queue. This is
4298 * important because anyone blocked on the lock being "merged away"
4299 * will still need to block on that range and it will simply continue
4300 * checking locks that are further down the list.
4301 */
4302 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4303 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4304 continue;
4305 }
4306 if (nflp->nfl_owner != nlop) {
4307 continue;
4308 }
4309 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4310 continue;
4311 }
4312 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4313 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4314 (coalnflp->nfl_type == nflp->nfl_type) &&
4315 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4316 coalnflp->nfl_end = nflp->nfl_end;
4317 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4318 lck_mtx_lock(&nlop->nlo_lock);
4319 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4320 lck_mtx_unlock(&nlop->nlo_lock);
4321 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4322 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4323 (coalnflp->nfl_type == nflp->nfl_type) &&
4324 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4325 coalnflp->nfl_start = nflp->nfl_start;
4326 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4327 lck_mtx_lock(&nlop->nlo_lock);
4328 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4329 lck_mtx_unlock(&nlop->nlo_lock);
4330 }
4331 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4332 continue;
4333 }
4334 if (nflp->nfl_blockcnt) {
4335 /* wake up anyone blocked on this lock */
4336 wakeup(nflp);
4337 } else {
4338 /* remove nflp from lock list and destroy */
4339 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4340 nfs_file_lock_destroy(nflp);
4341 }
4342 }
4343 }
4344
4345 lck_mtx_unlock(&np->n_openlock);
4346 nfs_open_state_clear_busy(np);
4347 nfs_mount_state_in_use_end(nmp, error);
4348
4349 if (nflp2) {
4350 nfs_file_lock_destroy(nflp2);
4351 }
4352 return error;
4353 }
4354
4355 /*
4356 * Release all (same style) locks within the given range.
4357 */
4358 int
4359 nfs_advlock_unlock(
4360 nfsnode_t np,
4361 struct nfs_open_file *nofp,
4362 struct nfs_lock_owner *nlop,
4363 uint64_t start,
4364 uint64_t end,
4365 int style,
4366 vfs_context_t ctx)
4367 {
4368 struct nfsmount *nmp;
4369 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4370 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4371
4372 nmp = NFSTONMP(np);
4373 if (nfs_mount_gone(nmp)) {
4374 return ENXIO;
4375 }
4376
4377 restart:
4378 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4379 return error;
4380 }
4381 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4382 nfs_mount_state_in_use_end(nmp, 0);
4383 error = nfs4_reopen(nofp, NULL);
4384 if (error) {
4385 return error;
4386 }
4387 goto restart;
4388 }
4389 if ((error = nfs_open_state_set_busy(np, NULL))) {
4390 nfs_mount_state_in_use_end(nmp, error);
4391 return error;
4392 }
4393
4394 lck_mtx_lock(&np->n_openlock);
4395 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4396 /*
4397 * We may need to allocate a new lock if an existing lock gets split.
4398 * So, we first scan the list to check for a split, and if there's
4399 * going to be one, we'll allocate one now.
4400 */
4401 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4402 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4403 continue;
4404 }
4405 if (nflp->nfl_owner != nlop) {
4406 continue;
4407 }
4408 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4409 continue;
4410 }
4411 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4412 continue;
4413 }
4414 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4415 willsplit = 1;
4416 break;
4417 }
4418 }
4419 if (willsplit) {
4420 lck_mtx_unlock(&np->n_openlock);
4421 nfs_open_state_clear_busy(np);
4422 nfs_mount_state_in_use_end(nmp, 0);
4423 newnflp = nfs_file_lock_alloc(nlop);
4424 if (!newnflp) {
4425 return ENOMEM;
4426 }
4427 goto restart;
4428 }
4429 }
4430
4431 /*
4432 * Free all of our locks in the given range.
4433 *
4434 * Note that this process requires sending requests to the server.
4435 * Because of this, we will release the n_openlock while performing
4436 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4437 * locks from changing underneath us. However, other entries in the
4438 * list may be removed. So we need to be careful walking the list.
4439 */
4440
4441 /*
4442 * Don't unlock ranges that are held by other-style locks.
4443 * If style is posix, don't send any unlock rpcs if flock is held.
4444 * If we unlock an flock, don't send unlock rpcs for any posix-style
4445 * ranges held - instead send unlocks for the ranges not held.
4446 */
4447 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4448 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4449 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4450 send_unlock_rpcs = 0;
4451 }
4452 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4453 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4454 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4455 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4456 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4457 uint64_t s = 0;
4458 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4459 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4460 while (!delegated && nflp) {
4461 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4462 /* unlock the range preceding this lock */
4463 lck_mtx_unlock(&np->n_openlock);
4464 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4465 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4466 if (nfs_mount_state_error_should_restart(error)) {
4467 nfs_open_state_clear_busy(np);
4468 nfs_mount_state_in_use_end(nmp, error);
4469 goto restart;
4470 }
4471 lck_mtx_lock(&np->n_openlock);
4472 if (error) {
4473 goto out;
4474 }
4475 s = nflp->nfl_end + 1;
4476 }
4477 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4478 }
4479 if (!delegated) {
4480 lck_mtx_unlock(&np->n_openlock);
4481 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4482 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4483 if (nfs_mount_state_error_should_restart(error)) {
4484 nfs_open_state_clear_busy(np);
4485 nfs_mount_state_in_use_end(nmp, error);
4486 goto restart;
4487 }
4488 lck_mtx_lock(&np->n_openlock);
4489 if (error) {
4490 goto out;
4491 }
4492 }
4493 send_unlock_rpcs = 0;
4494 }
4495
4496 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4497 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4498 continue;
4499 }
4500 if (nflp->nfl_owner != nlop) {
4501 continue;
4502 }
4503 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4504 continue;
4505 }
4506 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4507 continue;
4508 }
4509 /* here's one to unlock */
4510 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4511 /* The entire lock is being unlocked. */
4512 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4513 lck_mtx_unlock(&np->n_openlock);
4514 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4515 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4516 if (nfs_mount_state_error_should_restart(error)) {
4517 nfs_open_state_clear_busy(np);
4518 nfs_mount_state_in_use_end(nmp, error);
4519 goto restart;
4520 }
4521 lck_mtx_lock(&np->n_openlock);
4522 }
4523 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4524 if (error) {
4525 break;
4526 }
4527 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4528 lck_mtx_lock(&nlop->nlo_lock);
4529 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4530 lck_mtx_unlock(&nlop->nlo_lock);
4531 /* lock will be destroyed below, if no waiters */
4532 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4533 /* We're unlocking a range in the middle of a lock. */
4534 /* The current lock will be split into two locks. */
4535 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4536 lck_mtx_unlock(&np->n_openlock);
4537 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4538 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4539 if (nfs_mount_state_error_should_restart(error)) {
4540 nfs_open_state_clear_busy(np);
4541 nfs_mount_state_in_use_end(nmp, error);
4542 goto restart;
4543 }
4544 lck_mtx_lock(&np->n_openlock);
4545 }
4546 if (error) {
4547 break;
4548 }
4549 /* update locks and insert new lock after current lock */
4550 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4551 newnflp->nfl_type = nflp->nfl_type;
4552 newnflp->nfl_start = end + 1;
4553 newnflp->nfl_end = nflp->nfl_end;
4554 nflp->nfl_end = start - 1;
4555 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4556 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4557 nextnflp = newnflp;
4558 newnflp = NULL;
4559 } else if (start > nflp->nfl_start) {
4560 /* We're unlocking the end of a lock. */
4561 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4562 lck_mtx_unlock(&np->n_openlock);
4563 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4564 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4565 if (nfs_mount_state_error_should_restart(error)) {
4566 nfs_open_state_clear_busy(np);
4567 nfs_mount_state_in_use_end(nmp, error);
4568 goto restart;
4569 }
4570 lck_mtx_lock(&np->n_openlock);
4571 }
4572 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4573 if (error) {
4574 break;
4575 }
4576 nflp->nfl_end = start - 1;
4577 } else if (end < nflp->nfl_end) {
4578 /* We're unlocking the start of a lock. */
4579 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4580 lck_mtx_unlock(&np->n_openlock);
4581 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4582 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4583 if (nfs_mount_state_error_should_restart(error)) {
4584 nfs_open_state_clear_busy(np);
4585 nfs_mount_state_in_use_end(nmp, error);
4586 goto restart;
4587 }
4588 lck_mtx_lock(&np->n_openlock);
4589 }
4590 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4591 if (error) {
4592 break;
4593 }
4594 nflp->nfl_start = end + 1;
4595 }
4596 if (nflp->nfl_blockcnt) {
4597 /* wake up anyone blocked on this lock */
4598 wakeup(nflp);
4599 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4600 /* remove nflp from lock list and destroy */
4601 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4602 nfs_file_lock_destroy(nflp);
4603 }
4604 }
4605 out:
4606 lck_mtx_unlock(&np->n_openlock);
4607 nfs_open_state_clear_busy(np);
4608 nfs_mount_state_in_use_end(nmp, 0);
4609
4610 if (newnflp) {
4611 nfs_file_lock_destroy(newnflp);
4612 }
4613 return error;
4614 }
4615
4616 /*
4617 * NFSv4 advisory file locking
4618 */
4619 int
4620 nfs_vnop_advlock(
4621 struct vnop_advlock_args /* {
4622 * struct vnodeop_desc *a_desc;
4623 * vnode_t a_vp;
4624 * caddr_t a_id;
4625 * int a_op;
4626 * struct flock *a_fl;
4627 * int a_flags;
4628 * vfs_context_t a_context;
4629 * } */*ap)
4630 {
4631 vnode_t vp = ap->a_vp;
4632 nfsnode_t np = VTONFS(ap->a_vp);
4633 struct flock *fl = ap->a_fl;
4634 int op = ap->a_op;
4635 int flags = ap->a_flags;
4636 vfs_context_t ctx = ap->a_context;
4637 struct nfsmount *nmp;
4638 struct nfs_open_owner *noop = NULL;
4639 struct nfs_open_file *nofp = NULL;
4640 struct nfs_lock_owner *nlop = NULL;
4641 off_t lstart;
4642 uint64_t start, end;
4643 int error = 0, modified, style;
4644 enum vtype vtype;
4645 #define OFF_MAX QUAD_MAX
4646
4647 nmp = VTONMP(ap->a_vp);
4648 if (nfs_mount_gone(nmp)) {
4649 return ENXIO;
4650 }
4651 lck_mtx_lock(&nmp->nm_lock);
4652 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4653 lck_mtx_unlock(&nmp->nm_lock);
4654 return ENOTSUP;
4655 }
4656 lck_mtx_unlock(&nmp->nm_lock);
4657
4658 if (np->n_flag & NREVOKE) {
4659 return EIO;
4660 }
4661 vtype = vnode_vtype(ap->a_vp);
4662 if (vtype == VDIR) { /* ignore lock requests on directories */
4663 return 0;
4664 }
4665 if (vtype != VREG) { /* anything other than regular files is invalid */
4666 return EINVAL;
4667 }
4668
4669 /* Convert the flock structure into a start and end. */
4670 switch (fl->l_whence) {
4671 case SEEK_SET:
4672 case SEEK_CUR:
4673 /*
4674 * Caller is responsible for adding any necessary offset
4675 * to fl->l_start when SEEK_CUR is used.
4676 */
4677 lstart = fl->l_start;
4678 break;
4679 case SEEK_END:
4680 /* need to flush, and refetch attributes to make */
4681 /* sure we have the correct end of file offset */
4682 if ((error = nfs_node_lock(np))) {
4683 return error;
4684 }
4685 modified = (np->n_flag & NMODIFIED);
4686 nfs_node_unlock(np);
4687 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4688 return error;
4689 }
4690 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4691 return error;
4692 }
4693 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4694 if ((np->n_size > OFF_MAX) ||
4695 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4696 error = EOVERFLOW;
4697 }
4698 lstart = np->n_size + fl->l_start;
4699 nfs_data_unlock(np);
4700 if (error) {
4701 return error;
4702 }
4703 break;
4704 default:
4705 return EINVAL;
4706 }
4707 if (lstart < 0) {
4708 return EINVAL;
4709 }
4710 start = lstart;
4711 if (fl->l_len == 0) {
4712 end = UINT64_MAX;
4713 } else if (fl->l_len > 0) {
4714 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4715 return EOVERFLOW;
4716 }
4717 end = start - 1 + fl->l_len;
4718 } else { /* l_len is negative */
4719 if ((lstart + fl->l_len) < 0) {
4720 return EINVAL;
4721 }
4722 end = start - 1;
4723 start += fl->l_len;
4724 }
4725 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4726 return EINVAL;
4727 }
4728
4729 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4730 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4731 return EINVAL;
4732 }
4733
4734 /* find the lock owner, alloc if not unlock */
4735 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4736 if (!nlop) {
4737 error = (op == F_UNLCK) ? 0 : ENOMEM;
4738 if (error) {
4739 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4740 }
4741 goto out;
4742 }
4743
4744 if (op == F_GETLK) {
4745 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4746 } else {
4747 /* find the open owner */
4748 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4749 if (!noop) {
4750 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4751 error = EPERM;
4752 goto out;
4753 }
4754 /* find the open file */
4755 restart:
4756 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4757 if (error) {
4758 error = EBADF;
4759 }
4760 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4761 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4762 error = EIO;
4763 }
4764 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4765 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4766 nofp = NULL;
4767 if (!error) {
4768 goto restart;
4769 }
4770 }
4771 if (error) {
4772 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4773 goto out;
4774 }
4775 if (op == F_UNLCK) {
4776 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4777 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4778 if ((op == F_SETLK) && (flags & F_WAIT)) {
4779 op = F_SETLKW;
4780 }
4781 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4782 } else {
4783 /* not getlk, unlock or lock? */
4784 error = EINVAL;
4785 }
4786 }
4787
4788 out:
4789 if (nlop) {
4790 nfs_lock_owner_rele(nlop);
4791 }
4792 if (noop) {
4793 nfs_open_owner_rele(noop);
4794 }
4795 return error;
4796 }
4797
4798 /*
4799 * Check if an open owner holds any locks on a file.
4800 */
4801 int
4802 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4803 {
4804 struct nfs_lock_owner *nlop;
4805
4806 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4807 if (nlop->nlo_open_owner != noop) {
4808 continue;
4809 }
4810 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4811 break;
4812 }
4813 }
4814 return nlop ? 1 : 0;
4815 }
4816
4817 /*
4818 * Reopen simple (no deny, no locks) open state that was lost.
4819 */
4820 int
4821 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4822 {
4823 struct nfs_open_owner *noop = nofp->nof_owner;
4824 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4825 nfsnode_t np = nofp->nof_np;
4826 vnode_t vp = NFSTOV(np);
4827 vnode_t dvp = NULL;
4828 struct componentname cn;
4829 const char *vname = NULL;
4830 const char *name = NULL;
4831 size_t namelen;
4832 char smallname[128];
4833 char *filename = NULL;
4834 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4835 struct timespec ts = { 1, 0 };
4836
4837 lck_mtx_lock(&nofp->nof_lock);
4838 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4839 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4840 break;
4841 }
4842 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4843 slpflag = 0;
4844 }
4845 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4846 lck_mtx_unlock(&nofp->nof_lock);
4847 return error;
4848 }
4849 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4850 lck_mtx_unlock(&nofp->nof_lock);
4851
4852 nfs_node_lock_force(np);
4853 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4854 /*
4855 * The node's been sillyrenamed, so we need to use
4856 * the sillyrename directory/name to do the open.
4857 */
4858 struct nfs_sillyrename *nsp = np->n_sillyrename;
4859 dvp = NFSTOV(nsp->nsr_dnp);
4860 if ((error = vnode_get(dvp))) {
4861 nfs_node_unlock(np);
4862 goto out;
4863 }
4864 name = nsp->nsr_name;
4865 } else {
4866 /*
4867 * [sigh] We can't trust VFS to get the parent right for named
4868 * attribute nodes. (It likes to reparent the nodes after we've
4869 * created them.) Luckily we can probably get the right parent
4870 * from the n_parent we have stashed away.
4871 */
4872 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4873 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
4874 dvp = NULL;
4875 }
4876 if (!dvp) {
4877 dvp = vnode_getparent(vp);
4878 }
4879 vname = vnode_getname(vp);
4880 if (!dvp || !vname) {
4881 if (!error) {
4882 error = EIO;
4883 }
4884 nfs_node_unlock(np);
4885 goto out;
4886 }
4887 name = vname;
4888 }
4889 filename = &smallname[0];
4890 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4891 if (namelen >= sizeof(smallname)) {
4892 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
4893 if (!filename) {
4894 error = ENOMEM;
4895 goto out;
4896 }
4897 snprintf(filename, namelen + 1, "%s", name);
4898 }
4899 nfs_node_unlock(np);
4900 bzero(&cn, sizeof(cn));
4901 cn.cn_nameptr = filename;
4902 cn.cn_namelen = namelen;
4903
4904 restart:
4905 done = 0;
4906 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
4907 goto out;
4908 }
4909
4910 if (nofp->nof_rw) {
4911 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4912 }
4913 if (!error && nofp->nof_w) {
4914 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4915 }
4916 if (!error && nofp->nof_r) {
4917 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4918 }
4919
4920 if (nfs_mount_state_in_use_end(nmp, error)) {
4921 if (error == NFSERR_GRACE) {
4922 goto restart;
4923 }
4924 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4925 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4926 error = 0;
4927 goto out;
4928 }
4929 done = 1;
4930 out:
4931 if (error && (error != EINTR) && (error != ERESTART)) {
4932 nfs_revoke_open_state_for_node(np);
4933 }
4934 lck_mtx_lock(&nofp->nof_lock);
4935 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4936 if (done) {
4937 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4938 } else if (error) {
4939 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4940 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4941 }
4942 lck_mtx_unlock(&nofp->nof_lock);
4943 if (filename && (filename != &smallname[0])) {
4944 FREE(filename, M_TEMP);
4945 }
4946 if (vname) {
4947 vnode_putname(vname);
4948 }
4949 if (dvp != NULLVP) {
4950 vnode_put(dvp);
4951 }
4952 return error;
4953 }
4954
4955 /*
4956 * Send a normal OPEN RPC to open/create a file.
4957 */
4958 int
4959 nfs4_open_rpc(
4960 struct nfs_open_file *nofp,
4961 vfs_context_t ctx,
4962 struct componentname *cnp,
4963 struct vnode_attr *vap,
4964 vnode_t dvp,
4965 vnode_t *vpp,
4966 int create,
4967 int share_access,
4968 int share_deny)
4969 {
4970 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
4971 cnp, vap, dvp, vpp, create, share_access, share_deny);
4972 }
4973
4974 /*
4975 * Send an OPEN RPC to reopen a file.
4976 */
4977 int
4978 nfs4_open_reopen_rpc(
4979 struct nfs_open_file *nofp,
4980 thread_t thd,
4981 kauth_cred_t cred,
4982 struct componentname *cnp,
4983 vnode_t dvp,
4984 vnode_t *vpp,
4985 int share_access,
4986 int share_deny)
4987 {
4988 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
4989 }
4990
4991 /*
4992 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
4993 */
4994 int
4995 nfs4_open_confirm_rpc(
4996 struct nfsmount *nmp,
4997 nfsnode_t dnp,
4998 u_char *fhp,
4999 int fhlen,
5000 struct nfs_open_owner *noop,
5001 nfs_stateid *sid,
5002 thread_t thd,
5003 kauth_cred_t cred,
5004 struct nfs_vattr *nvap,
5005 uint64_t *xidp)
5006 {
5007 struct nfsm_chain nmreq, nmrep;
5008 int error = 0, status, numops;
5009 struct nfsreq_secinfo_args si;
5010
5011 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5012 nfsm_chain_null(&nmreq);
5013 nfsm_chain_null(&nmrep);
5014
5015 // PUTFH, OPEN_CONFIRM, GETATTR
5016 numops = 3;
5017 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5018 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5019 numops--;
5020 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5021 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5022 numops--;
5023 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5024 nfsm_chain_add_stateid(error, &nmreq, sid);
5025 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5026 numops--;
5027 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5028 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5029 nfsm_chain_build_done(error, &nmreq);
5030 nfsm_assert(error, (numops == 0), EPROTO);
5031 nfsmout_if(error);
5032 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5033
5034 nfsm_chain_skip_tag(error, &nmrep);
5035 nfsm_chain_get_32(error, &nmrep, numops);
5036 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5037 nfsmout_if(error);
5038 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5039 nfs_owner_seqid_increment(noop, NULL, error);
5040 nfsm_chain_get_stateid(error, &nmrep, sid);
5041 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5042 nfsmout_if(error);
5043 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5044 nfsmout:
5045 nfsm_chain_cleanup(&nmreq);
5046 nfsm_chain_cleanup(&nmrep);
5047 return error;
5048 }
5049
5050 /*
5051 * common OPEN RPC code
5052 *
5053 * If create is set, ctx must be passed in.
5054 * Returns a node on success if no node passed in.
5055 */
5056 int
5057 nfs4_open_rpc_internal(
5058 struct nfs_open_file *nofp,
5059 vfs_context_t ctx,
5060 thread_t thd,
5061 kauth_cred_t cred,
5062 struct componentname *cnp,
5063 struct vnode_attr *vap,
5064 vnode_t dvp,
5065 vnode_t *vpp,
5066 int create,
5067 int share_access,
5068 int share_deny)
5069 {
5070 struct nfsmount *nmp;
5071 struct nfs_open_owner *noop = nofp->nof_owner;
5072 struct nfs_vattr nvattr;
5073 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5074 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
5075 u_int64_t xid, savedxid = 0;
5076 nfsnode_t dnp = VTONFS(dvp);
5077 nfsnode_t np, newnp = NULL;
5078 vnode_t newvp = NULL;
5079 struct nfsm_chain nmreq, nmrep;
5080 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5081 uint32_t rflags, delegation, recall;
5082 struct nfs_stateid stateid, dstateid, *sid;
5083 fhandle_t fh;
5084 struct nfsreq rq, *req = &rq;
5085 struct nfs_dulookup dul;
5086 char sbuf[64], *s;
5087 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5088 struct kauth_ace ace;
5089 struct nfsreq_secinfo_args si;
5090
5091 if (create && !ctx) {
5092 return EINVAL;
5093 }
5094
5095 nmp = VTONMP(dvp);
5096 if (nfs_mount_gone(nmp)) {
5097 return ENXIO;
5098 }
5099 nfsvers = nmp->nm_vers;
5100 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5101 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5102 return EINVAL;
5103 }
5104
5105 np = *vpp ? VTONFS(*vpp) : NULL;
5106 if (create && vap) {
5107 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5108 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5109 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5110 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5111 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5112 vap->va_vaflags |= VA_UTIMES_NULL;
5113 }
5114 } else {
5115 exclusive = gotuid = gotgid = 0;
5116 }
5117 if (nofp) {
5118 sid = &nofp->nof_stateid;
5119 } else {
5120 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5121 sid = &stateid;
5122 }
5123
5124 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5125 return error;
5126 }
5127 again:
5128 rflags = delegation = recall = 0;
5129 ace.ace_flags = 0;
5130 s = sbuf;
5131 slen = sizeof(sbuf);
5132 NVATTR_INIT(&nvattr);
5133 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5134
5135 nfsm_chain_null(&nmreq);
5136 nfsm_chain_null(&nmrep);
5137
5138 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5139 numops = 6;
5140 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5141 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5142 numops--;
5143 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5144 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5145 numops--;
5146 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5147 numops--;
5148 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5149 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5150 nfsm_chain_add_32(error, &nmreq, share_access);
5151 nfsm_chain_add_32(error, &nmreq, share_deny);
5152 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
5153 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5154 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
5155 nfsm_chain_add_32(error, &nmreq, create);
5156 if (create) {
5157 if (exclusive) {
5158 static uint32_t create_verf; // XXX need a better verifier
5159 create_verf++;
5160 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5161 /* insert 64 bit verifier */
5162 nfsm_chain_add_32(error, &nmreq, create_verf);
5163 nfsm_chain_add_32(error, &nmreq, create_verf);
5164 } else {
5165 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5166 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5167 }
5168 }
5169 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5170 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5171 numops--;
5172 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5173 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5174 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5175 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5176 numops--;
5177 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5178 numops--;
5179 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5180 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5181 nfsm_chain_build_done(error, &nmreq);
5182 nfsm_assert(error, (numops == 0), EPROTO);
5183 if (!error) {
5184 error = busyerror = nfs_node_set_busy(dnp, thd);
5185 }
5186 nfsmout_if(error);
5187
5188 if (create && !namedattrs) {
5189 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5190 }
5191
5192 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
5193 if (!error) {
5194 if (create && !namedattrs) {
5195 nfs_dulookup_start(&dul, dnp, ctx);
5196 }
5197 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5198 savedxid = xid;
5199 }
5200
5201 if (create && !namedattrs) {
5202 nfs_dulookup_finish(&dul, dnp, ctx);
5203 }
5204
5205 if ((lockerror = nfs_node_lock(dnp))) {
5206 error = lockerror;
5207 }
5208 nfsm_chain_skip_tag(error, &nmrep);
5209 nfsm_chain_get_32(error, &nmrep, numops);
5210 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5211 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5212 nfsmout_if(error);
5213 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5214 nfs_owner_seqid_increment(noop, NULL, error);
5215 nfsm_chain_get_stateid(error, &nmrep, sid);
5216 nfsm_chain_check_change_info(error, &nmrep, dnp);
5217 nfsm_chain_get_32(error, &nmrep, rflags);
5218 bmlen = NFS_ATTR_BITMAP_LEN;
5219 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5220 nfsm_chain_get_32(error, &nmrep, delegation);
5221 if (!error) {
5222 switch (delegation) {
5223 case NFS_OPEN_DELEGATE_NONE:
5224 break;
5225 case NFS_OPEN_DELEGATE_READ:
5226 case NFS_OPEN_DELEGATE_WRITE:
5227 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5228 nfsm_chain_get_32(error, &nmrep, recall);
5229 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5230 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5231 }
5232 /* if we have any trouble accepting the ACE, just invalidate it */
5233 ace_type = ace_flags = ace_mask = len = 0;
5234 nfsm_chain_get_32(error, &nmrep, ace_type);
5235 nfsm_chain_get_32(error, &nmrep, ace_flags);
5236 nfsm_chain_get_32(error, &nmrep, ace_mask);
5237 nfsm_chain_get_32(error, &nmrep, len);
5238 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5239 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5240 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5241 if (!error && (len >= slen)) {
5242 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5243 if (s) {
5244 slen = len + 1;
5245 } else {
5246 ace.ace_flags = 0;
5247 }
5248 }
5249 if (s) {
5250 nfsm_chain_get_opaque(error, &nmrep, len, s);
5251 } else {
5252 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5253 }
5254 if (!error && s) {
5255 s[len] = '\0';
5256 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5257 ace.ace_flags = 0;
5258 }
5259 }
5260 if (error || !s) {
5261 ace.ace_flags = 0;
5262 }
5263 if (s && (s != sbuf)) {
5264 FREE(s, M_TEMP);
5265 }
5266 break;
5267 default:
5268 error = EBADRPC;
5269 break;
5270 }
5271 }
5272 /* At this point if we have no error, the object was created/opened. */
5273 open_error = error;
5274 nfsmout_if(error);
5275 if (create && vap && !exclusive) {
5276 nfs_vattr_set_supported(bitmap, vap);
5277 }
5278 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5279 nfsmout_if(error);
5280 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5281 nfsmout_if(error);
5282 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5283 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5284 error = EBADRPC;
5285 goto nfsmout;
5286 }
5287 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5288 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5289 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5290 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5291 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5292 }
5293 }
5294 /* directory attributes: if we don't get them, make sure to invalidate */
5295 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5296 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5297 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5298 if (error) {
5299 NATTRINVALIDATE(dnp);
5300 }
5301 nfsmout_if(error);
5302
5303 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5304 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5305 }
5306
5307 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5308 nfs_node_unlock(dnp);
5309 lockerror = ENOENT;
5310 NVATTR_CLEANUP(&nvattr);
5311 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
5312 nfsmout_if(error);
5313 savedxid = xid;
5314 if ((lockerror = nfs_node_lock(dnp))) {
5315 error = lockerror;
5316 }
5317 }
5318
5319 nfsmout:
5320 nfsm_chain_cleanup(&nmreq);
5321 nfsm_chain_cleanup(&nmrep);
5322
5323 if (!lockerror && create) {
5324 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5325 dnp->n_flag &= ~NNEGNCENTRIES;
5326 cache_purge_negatives(dvp);
5327 }
5328 dnp->n_flag |= NMODIFIED;
5329 nfs_node_unlock(dnp);
5330 lockerror = ENOENT;
5331 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5332 }
5333 if (!lockerror) {
5334 nfs_node_unlock(dnp);
5335 }
5336 if (!error && !np && fh.fh_len) {
5337 /* create the vnode with the filehandle and attributes */
5338 xid = savedxid;
5339 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
5340 if (!error) {
5341 newvp = NFSTOV(newnp);
5342 }
5343 }
5344 NVATTR_CLEANUP(&nvattr);
5345 if (!busyerror) {
5346 nfs_node_clear_busy(dnp);
5347 }
5348 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5349 if (!np) {
5350 np = newnp;
5351 }
5352 if (!error && np && !recall) {
5353 /* stuff the delegation state in the node */
5354 lck_mtx_lock(&np->n_openlock);
5355 np->n_openflags &= ~N_DELEG_MASK;
5356 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5357 np->n_dstateid = dstateid;
5358 np->n_dace = ace;
5359 if (np->n_dlink.tqe_next == NFSNOLIST) {
5360 lck_mtx_lock(&nmp->nm_lock);
5361 if (np->n_dlink.tqe_next == NFSNOLIST) {
5362 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5363 }
5364 lck_mtx_unlock(&nmp->nm_lock);
5365 }
5366 lck_mtx_unlock(&np->n_openlock);
5367 } else {
5368 /* give the delegation back */
5369 if (np) {
5370 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5371 /* update delegation state and return it */
5372 lck_mtx_lock(&np->n_openlock);
5373 np->n_openflags &= ~N_DELEG_MASK;
5374 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5375 np->n_dstateid = dstateid;
5376 np->n_dace = ace;
5377 if (np->n_dlink.tqe_next == NFSNOLIST) {
5378 lck_mtx_lock(&nmp->nm_lock);
5379 if (np->n_dlink.tqe_next == NFSNOLIST) {
5380 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5381 }
5382 lck_mtx_unlock(&nmp->nm_lock);
5383 }
5384 lck_mtx_unlock(&np->n_openlock);
5385 /* don't need to send a separate delegreturn for fh */
5386 fh.fh_len = 0;
5387 }
5388 /* return np's current delegation */
5389 nfs4_delegation_return(np, 0, thd, cred);
5390 }
5391 if (fh.fh_len) { /* return fh's delegation if it wasn't for np */
5392 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
5393 }
5394 }
5395 }
5396 if (error) {
5397 if (exclusive && (error == NFSERR_NOTSUPP)) {
5398 exclusive = 0;
5399 goto again;
5400 }
5401 if (newvp) {
5402 nfs_node_unlock(newnp);
5403 vnode_put(newvp);
5404 }
5405 } else if (create) {
5406 nfs_node_unlock(newnp);
5407 if (exclusive) {
5408 error = nfs4_setattr_rpc(newnp, vap, ctx);
5409 if (error && (gotuid || gotgid)) {
5410 /* it's possible the server didn't like our attempt to set IDs. */
5411 /* so, let's try it again without those */
5412 VATTR_CLEAR_ACTIVE(vap, va_uid);
5413 VATTR_CLEAR_ACTIVE(vap, va_gid);
5414 error = nfs4_setattr_rpc(newnp, vap, ctx);
5415 }
5416 }
5417 if (error) {
5418 vnode_put(newvp);
5419 } else {
5420 *vpp = newvp;
5421 }
5422 }
5423 nfs_open_owner_clear_busy(noop);
5424 return error;
5425 }
5426
5427
5428 /*
5429 * Send an OPEN RPC to claim a delegated open for a file
5430 */
5431 int
5432 nfs4_claim_delegated_open_rpc(
5433 struct nfs_open_file *nofp,
5434 int share_access,
5435 int share_deny,
5436 int flags)
5437 {
5438 struct nfsmount *nmp;
5439 struct nfs_open_owner *noop = nofp->nof_owner;
5440 struct nfs_vattr nvattr;
5441 int error = 0, lockerror = ENOENT, status;
5442 int nfsvers, numops;
5443 u_int64_t xid;
5444 nfsnode_t np = nofp->nof_np;
5445 struct nfsm_chain nmreq, nmrep;
5446 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5447 uint32_t rflags = 0, delegation, recall = 0;
5448 fhandle_t fh;
5449 struct nfs_stateid dstateid;
5450 char sbuf[64], *s = sbuf;
5451 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5452 struct kauth_ace ace;
5453 vnode_t dvp = NULL;
5454 const char *vname = NULL;
5455 const char *name = NULL;
5456 size_t namelen;
5457 char smallname[128];
5458 char *filename = NULL;
5459 struct nfsreq_secinfo_args si;
5460
5461 nmp = NFSTONMP(np);
5462 if (nfs_mount_gone(nmp)) {
5463 return ENXIO;
5464 }
5465 nfsvers = nmp->nm_vers;
5466
5467 nfs_node_lock_force(np);
5468 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5469 /*
5470 * The node's been sillyrenamed, so we need to use
5471 * the sillyrename directory/name to do the open.
5472 */
5473 struct nfs_sillyrename *nsp = np->n_sillyrename;
5474 dvp = NFSTOV(nsp->nsr_dnp);
5475 if ((error = vnode_get(dvp))) {
5476 nfs_node_unlock(np);
5477 goto out;
5478 }
5479 name = nsp->nsr_name;
5480 } else {
5481 /*
5482 * [sigh] We can't trust VFS to get the parent right for named
5483 * attribute nodes. (It likes to reparent the nodes after we've
5484 * created them.) Luckily we can probably get the right parent
5485 * from the n_parent we have stashed away.
5486 */
5487 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5488 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5489 dvp = NULL;
5490 }
5491 if (!dvp) {
5492 dvp = vnode_getparent(NFSTOV(np));
5493 }
5494 vname = vnode_getname(NFSTOV(np));
5495 if (!dvp || !vname) {
5496 if (!error) {
5497 error = EIO;
5498 }
5499 nfs_node_unlock(np);
5500 goto out;
5501 }
5502 name = vname;
5503 }
5504 filename = &smallname[0];
5505 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5506 if (namelen >= sizeof(smallname)) {
5507 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
5508 if (!filename) {
5509 error = ENOMEM;
5510 nfs_node_unlock(np);
5511 goto out;
5512 }
5513 snprintf(filename, namelen + 1, "%s", name);
5514 }
5515 nfs_node_unlock(np);
5516
5517 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5518 goto out;
5519 }
5520 NVATTR_INIT(&nvattr);
5521 delegation = NFS_OPEN_DELEGATE_NONE;
5522 dstateid = np->n_dstateid;
5523 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5524
5525 nfsm_chain_null(&nmreq);
5526 nfsm_chain_null(&nmrep);
5527
5528 // PUTFH, OPEN, GETATTR(FH)
5529 numops = 3;
5530 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5531 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5532 numops--;
5533 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5534 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5535 numops--;
5536 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5537 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5538 nfsm_chain_add_32(error, &nmreq, share_access);
5539 nfsm_chain_add_32(error, &nmreq, share_deny);
5540 // open owner: clientid + uid
5541 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5542 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5543 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5544 // openflag4
5545 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5546 // open_claim4
5547 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5548 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5549 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5550 numops--;
5551 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5552 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5553 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5554 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5555 nfsm_chain_build_done(error, &nmreq);
5556 nfsm_assert(error, (numops == 0), EPROTO);
5557 nfsmout_if(error);
5558
5559 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5560 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5561
5562 if ((lockerror = nfs_node_lock(np))) {
5563 error = lockerror;
5564 }
5565 nfsm_chain_skip_tag(error, &nmrep);
5566 nfsm_chain_get_32(error, &nmrep, numops);
5567 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5568 nfsmout_if(error);
5569 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5570 nfs_owner_seqid_increment(noop, NULL, error);
5571 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5572 nfsm_chain_check_change_info(error, &nmrep, np);
5573 nfsm_chain_get_32(error, &nmrep, rflags);
5574 bmlen = NFS_ATTR_BITMAP_LEN;
5575 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5576 nfsm_chain_get_32(error, &nmrep, delegation);
5577 if (!error) {
5578 switch (delegation) {
5579 case NFS_OPEN_DELEGATE_NONE:
5580 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5581 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5582 break;
5583 case NFS_OPEN_DELEGATE_READ:
5584 case NFS_OPEN_DELEGATE_WRITE:
5585 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5586 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5587 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5588 (delegation == NFS_OPEN_DELEGATE_READ))) {
5589 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5590 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5591 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5592 }
5593 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5594 nfsm_chain_get_32(error, &nmrep, recall);
5595 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5596 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5597 }
5598 /* if we have any trouble accepting the ACE, just invalidate it */
5599 ace_type = ace_flags = ace_mask = len = 0;
5600 nfsm_chain_get_32(error, &nmrep, ace_type);
5601 nfsm_chain_get_32(error, &nmrep, ace_flags);
5602 nfsm_chain_get_32(error, &nmrep, ace_mask);
5603 nfsm_chain_get_32(error, &nmrep, len);
5604 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5605 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5606 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5607 if (!error && (len >= slen)) {
5608 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5609 if (s) {
5610 slen = len + 1;
5611 } else {
5612 ace.ace_flags = 0;
5613 }
5614 }
5615 if (s) {
5616 nfsm_chain_get_opaque(error, &nmrep, len, s);
5617 } else {
5618 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5619 }
5620 if (!error && s) {
5621 s[len] = '\0';
5622 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5623 ace.ace_flags = 0;
5624 }
5625 }
5626 if (error || !s) {
5627 ace.ace_flags = 0;
5628 }
5629 if (s && (s != sbuf)) {
5630 FREE(s, M_TEMP);
5631 }
5632 if (!error) {
5633 /* stuff the latest delegation state in the node */
5634 lck_mtx_lock(&np->n_openlock);
5635 np->n_openflags &= ~N_DELEG_MASK;
5636 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5637 np->n_dstateid = dstateid;
5638 np->n_dace = ace;
5639 if (np->n_dlink.tqe_next == NFSNOLIST) {
5640 lck_mtx_lock(&nmp->nm_lock);
5641 if (np->n_dlink.tqe_next == NFSNOLIST) {
5642 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5643 }
5644 lck_mtx_unlock(&nmp->nm_lock);
5645 }
5646 lck_mtx_unlock(&np->n_openlock);
5647 }
5648 break;
5649 default:
5650 error = EBADRPC;
5651 break;
5652 }
5653 }
5654 nfsmout_if(error);
5655 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5656 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5657 nfsmout_if(error);
5658 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5659 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5660 error = EBADRPC;
5661 goto nfsmout;
5662 }
5663 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5664 // XXX what if fh doesn't match the vnode we think we're re-opening?
5665 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5666 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5667 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5668 }
5669 }
5670 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5671 nfsmout_if(error);
5672 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5673 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5674 }
5675 nfsmout:
5676 NVATTR_CLEANUP(&nvattr);
5677 nfsm_chain_cleanup(&nmreq);
5678 nfsm_chain_cleanup(&nmrep);
5679 if (!lockerror) {
5680 nfs_node_unlock(np);
5681 }
5682 nfs_open_owner_clear_busy(noop);
5683 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5684 if (recall) {
5685 /*
5686 * We're making a delegated claim.
5687 * Don't return the delegation here in case we have more to claim.
5688 * Just make sure it's queued up to be returned.
5689 */
5690 nfs4_delegation_return_enqueue(np);
5691 }
5692 }
5693 out:
5694 // if (!error)
5695 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5696 if (filename && (filename != &smallname[0])) {
5697 FREE(filename, M_TEMP);
5698 }
5699 if (vname) {
5700 vnode_putname(vname);
5701 }
5702 if (dvp != NULLVP) {
5703 vnode_put(dvp);
5704 }
5705 return error;
5706 }
5707
5708 /*
5709 * Send an OPEN RPC to reclaim an open file.
5710 */
5711 int
5712 nfs4_open_reclaim_rpc(
5713 struct nfs_open_file *nofp,
5714 int share_access,
5715 int share_deny)
5716 {
5717 struct nfsmount *nmp;
5718 struct nfs_open_owner *noop = nofp->nof_owner;
5719 struct nfs_vattr nvattr;
5720 int error = 0, lockerror = ENOENT, status;
5721 int nfsvers, numops;
5722 u_int64_t xid;
5723 nfsnode_t np = nofp->nof_np;
5724 struct nfsm_chain nmreq, nmrep;
5725 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5726 uint32_t rflags = 0, delegation, recall = 0;
5727 fhandle_t fh;
5728 struct nfs_stateid dstateid;
5729 char sbuf[64], *s = sbuf;
5730 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5731 struct kauth_ace ace;
5732 struct nfsreq_secinfo_args si;
5733
5734 nmp = NFSTONMP(np);
5735 if (nfs_mount_gone(nmp)) {
5736 return ENXIO;
5737 }
5738 nfsvers = nmp->nm_vers;
5739
5740 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5741 return error;
5742 }
5743
5744 NVATTR_INIT(&nvattr);
5745 delegation = NFS_OPEN_DELEGATE_NONE;
5746 dstateid = np->n_dstateid;
5747 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5748
5749 nfsm_chain_null(&nmreq);
5750 nfsm_chain_null(&nmrep);
5751
5752 // PUTFH, OPEN, GETATTR(FH)
5753 numops = 3;
5754 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5755 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5756 numops--;
5757 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5758 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5759 numops--;
5760 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5761 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5762 nfsm_chain_add_32(error, &nmreq, share_access);
5763 nfsm_chain_add_32(error, &nmreq, share_deny);
5764 // open owner: clientid + uid
5765 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5766 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5767 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5768 // openflag4
5769 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5770 // open_claim4
5771 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5772 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5773 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5774 NFS_OPEN_DELEGATE_NONE;
5775 nfsm_chain_add_32(error, &nmreq, delegation);
5776 delegation = NFS_OPEN_DELEGATE_NONE;
5777 numops--;
5778 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5779 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5780 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5781 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5782 nfsm_chain_build_done(error, &nmreq);
5783 nfsm_assert(error, (numops == 0), EPROTO);
5784 nfsmout_if(error);
5785
5786 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5787 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5788
5789 if ((lockerror = nfs_node_lock(np))) {
5790 error = lockerror;
5791 }
5792 nfsm_chain_skip_tag(error, &nmrep);
5793 nfsm_chain_get_32(error, &nmrep, numops);
5794 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5795 nfsmout_if(error);
5796 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5797 nfs_owner_seqid_increment(noop, NULL, error);
5798 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5799 nfsm_chain_check_change_info(error, &nmrep, np);
5800 nfsm_chain_get_32(error, &nmrep, rflags);
5801 bmlen = NFS_ATTR_BITMAP_LEN;
5802 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5803 nfsm_chain_get_32(error, &nmrep, delegation);
5804 if (!error) {
5805 switch (delegation) {
5806 case NFS_OPEN_DELEGATE_NONE:
5807 if (np->n_openflags & N_DELEG_MASK) {
5808 /*
5809 * Hey! We were supposed to get our delegation back even
5810 * if it was getting immediately recalled. Bad server!
5811 *
5812 * Just try to return the existing delegation.
5813 */
5814 // NP(np, "nfs: open reclaim didn't return delegation?");
5815 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5816 recall = 1;
5817 }
5818 break;
5819 case NFS_OPEN_DELEGATE_READ:
5820 case NFS_OPEN_DELEGATE_WRITE:
5821 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5822 nfsm_chain_get_32(error, &nmrep, recall);
5823 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5824 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5825 }
5826 /* if we have any trouble accepting the ACE, just invalidate it */
5827 ace_type = ace_flags = ace_mask = len = 0;
5828 nfsm_chain_get_32(error, &nmrep, ace_type);
5829 nfsm_chain_get_32(error, &nmrep, ace_flags);
5830 nfsm_chain_get_32(error, &nmrep, ace_mask);
5831 nfsm_chain_get_32(error, &nmrep, len);
5832 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5833 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5834 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5835 if (!error && (len >= slen)) {
5836 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5837 if (s) {
5838 slen = len + 1;
5839 } else {
5840 ace.ace_flags = 0;
5841 }
5842 }
5843 if (s) {
5844 nfsm_chain_get_opaque(error, &nmrep, len, s);
5845 } else {
5846 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5847 }
5848 if (!error && s) {
5849 s[len] = '\0';
5850 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5851 ace.ace_flags = 0;
5852 }
5853 }
5854 if (error || !s) {
5855 ace.ace_flags = 0;
5856 }
5857 if (s && (s != sbuf)) {
5858 FREE(s, M_TEMP);
5859 }
5860 if (!error) {
5861 /* stuff the delegation state in the node */
5862 lck_mtx_lock(&np->n_openlock);
5863 np->n_openflags &= ~N_DELEG_MASK;
5864 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5865 np->n_dstateid = dstateid;
5866 np->n_dace = ace;
5867 if (np->n_dlink.tqe_next == NFSNOLIST) {
5868 lck_mtx_lock(&nmp->nm_lock);
5869 if (np->n_dlink.tqe_next == NFSNOLIST) {
5870 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5871 }
5872 lck_mtx_unlock(&nmp->nm_lock);
5873 }
5874 lck_mtx_unlock(&np->n_openlock);
5875 }
5876 break;
5877 default:
5878 error = EBADRPC;
5879 break;
5880 }
5881 }
5882 nfsmout_if(error);
5883 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5884 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5885 nfsmout_if(error);
5886 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5887 NP(np, "nfs: open reclaim didn't return filehandle?");
5888 error = EBADRPC;
5889 goto nfsmout;
5890 }
5891 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5892 // XXX what if fh doesn't match the vnode we think we're re-opening?
5893 // That should be pretty hard in this case, given that we are doing
5894 // the open reclaim using the file handle (and not a dir/name pair).
5895 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5896 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5897 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5898 }
5899 }
5900 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5901 nfsmout_if(error);
5902 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5903 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5904 }
5905 nfsmout:
5906 // if (!error)
5907 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5908 NVATTR_CLEANUP(&nvattr);
5909 nfsm_chain_cleanup(&nmreq);
5910 nfsm_chain_cleanup(&nmrep);
5911 if (!lockerror) {
5912 nfs_node_unlock(np);
5913 }
5914 nfs_open_owner_clear_busy(noop);
5915 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5916 if (recall) {
5917 nfs4_delegation_return_enqueue(np);
5918 }
5919 }
5920 return error;
5921 }
5922
5923 int
5924 nfs4_open_downgrade_rpc(
5925 nfsnode_t np,
5926 struct nfs_open_file *nofp,
5927 vfs_context_t ctx)
5928 {
5929 struct nfs_open_owner *noop = nofp->nof_owner;
5930 struct nfsmount *nmp;
5931 int error, lockerror = ENOENT, status, nfsvers, numops;
5932 struct nfsm_chain nmreq, nmrep;
5933 u_int64_t xid;
5934 struct nfsreq_secinfo_args si;
5935
5936 nmp = NFSTONMP(np);
5937 if (nfs_mount_gone(nmp)) {
5938 return ENXIO;
5939 }
5940 nfsvers = nmp->nm_vers;
5941
5942 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5943 return error;
5944 }
5945
5946 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5947 nfsm_chain_null(&nmreq);
5948 nfsm_chain_null(&nmrep);
5949
5950 // PUTFH, OPEN_DOWNGRADE, GETATTR
5951 numops = 3;
5952 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5953 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5954 numops--;
5955 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5956 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5957 numops--;
5958 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5959 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5960 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5961 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
5962 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
5963 numops--;
5964 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5965 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
5966 nfsm_chain_build_done(error, &nmreq);
5967 nfsm_assert(error, (numops == 0), EPROTO);
5968 nfsmout_if(error);
5969 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
5970 vfs_context_thread(ctx), vfs_context_ucred(ctx),
5971 &si, R_NOINTR, &nmrep, &xid, &status);
5972
5973 if ((lockerror = nfs_node_lock(np))) {
5974 error = lockerror;
5975 }
5976 nfsm_chain_skip_tag(error, &nmrep);
5977 nfsm_chain_get_32(error, &nmrep, numops);
5978 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5979 nfsmout_if(error);
5980 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
5981 nfs_owner_seqid_increment(noop, NULL, error);
5982 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5983 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5984 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
5985 nfsmout:
5986 if (!lockerror) {
5987 nfs_node_unlock(np);
5988 }
5989 nfs_open_owner_clear_busy(noop);
5990 nfsm_chain_cleanup(&nmreq);
5991 nfsm_chain_cleanup(&nmrep);
5992 return error;
5993 }
5994
5995 int
5996 nfs4_close_rpc(
5997 nfsnode_t np,
5998 struct nfs_open_file *nofp,
5999 thread_t thd,
6000 kauth_cred_t cred,
6001 int flags)
6002 {
6003 struct nfs_open_owner *noop = nofp->nof_owner;
6004 struct nfsmount *nmp;
6005 int error, lockerror = ENOENT, status, nfsvers, numops;
6006 struct nfsm_chain nmreq, nmrep;
6007 u_int64_t xid;
6008 struct nfsreq_secinfo_args si;
6009
6010 nmp = NFSTONMP(np);
6011 if (nfs_mount_gone(nmp)) {
6012 return ENXIO;
6013 }
6014 nfsvers = nmp->nm_vers;
6015
6016 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6017 return error;
6018 }
6019
6020 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6021 nfsm_chain_null(&nmreq);
6022 nfsm_chain_null(&nmrep);
6023
6024 // PUTFH, CLOSE, GETATTR
6025 numops = 3;
6026 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6027 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6028 numops--;
6029 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6030 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6031 numops--;
6032 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
6033 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6034 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6035 numops--;
6036 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6037 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6038 nfsm_chain_build_done(error, &nmreq);
6039 nfsm_assert(error, (numops == 0), EPROTO);
6040 nfsmout_if(error);
6041 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6042
6043 if ((lockerror = nfs_node_lock(np))) {
6044 error = lockerror;
6045 }
6046 nfsm_chain_skip_tag(error, &nmrep);
6047 nfsm_chain_get_32(error, &nmrep, numops);
6048 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6049 nfsmout_if(error);
6050 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6051 nfs_owner_seqid_increment(noop, NULL, error);
6052 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6053 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6054 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6055 nfsmout:
6056 if (!lockerror) {
6057 nfs_node_unlock(np);
6058 }
6059 nfs_open_owner_clear_busy(noop);
6060 nfsm_chain_cleanup(&nmreq);
6061 nfsm_chain_cleanup(&nmrep);
6062 return error;
6063 }
6064
6065
6066 /*
6067 * Claim the delegated open combinations this open file holds.
6068 */
6069 int
6070 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6071 {
6072 struct nfs_open_owner *noop = nofp->nof_owner;
6073 struct nfs_lock_owner *nlop;
6074 struct nfs_file_lock *nflp, *nextnflp;
6075 struct nfsmount *nmp;
6076 int error = 0, reopen = 0;
6077
6078 if (nofp->nof_d_rw_drw) {
6079 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6080 if (!error) {
6081 lck_mtx_lock(&nofp->nof_lock);
6082 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6083 nofp->nof_d_rw_drw = 0;
6084 lck_mtx_unlock(&nofp->nof_lock);
6085 }
6086 }
6087 if (!error && nofp->nof_d_w_drw) {
6088 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6089 if (!error) {
6090 lck_mtx_lock(&nofp->nof_lock);
6091 nofp->nof_w_drw += nofp->nof_d_w_drw;
6092 nofp->nof_d_w_drw = 0;
6093 lck_mtx_unlock(&nofp->nof_lock);
6094 }
6095 }
6096 if (!error && nofp->nof_d_r_drw) {
6097 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6098 if (!error) {
6099 lck_mtx_lock(&nofp->nof_lock);
6100 nofp->nof_r_drw += nofp->nof_d_r_drw;
6101 nofp->nof_d_r_drw = 0;
6102 lck_mtx_unlock(&nofp->nof_lock);
6103 }
6104 }
6105 if (!error && nofp->nof_d_rw_dw) {
6106 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6107 if (!error) {
6108 lck_mtx_lock(&nofp->nof_lock);
6109 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6110 nofp->nof_d_rw_dw = 0;
6111 lck_mtx_unlock(&nofp->nof_lock);
6112 }
6113 }
6114 if (!error && nofp->nof_d_w_dw) {
6115 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6116 if (!error) {
6117 lck_mtx_lock(&nofp->nof_lock);
6118 nofp->nof_w_dw += nofp->nof_d_w_dw;
6119 nofp->nof_d_w_dw = 0;
6120 lck_mtx_unlock(&nofp->nof_lock);
6121 }
6122 }
6123 if (!error && nofp->nof_d_r_dw) {
6124 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6125 if (!error) {
6126 lck_mtx_lock(&nofp->nof_lock);
6127 nofp->nof_r_dw += nofp->nof_d_r_dw;
6128 nofp->nof_d_r_dw = 0;
6129 lck_mtx_unlock(&nofp->nof_lock);
6130 }
6131 }
6132 /* non-deny-mode opens may be reopened if no locks are held */
6133 if (!error && nofp->nof_d_rw) {
6134 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6135 /* for some errors, we should just try reopening the file */
6136 if (nfs_mount_state_error_delegation_lost(error)) {
6137 reopen = error;
6138 }
6139 if (!error || reopen) {
6140 lck_mtx_lock(&nofp->nof_lock);
6141 nofp->nof_rw += nofp->nof_d_rw;
6142 nofp->nof_d_rw = 0;
6143 lck_mtx_unlock(&nofp->nof_lock);
6144 }
6145 }
6146 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6147 if ((!error || reopen) && nofp->nof_d_w) {
6148 if (!error) {
6149 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6150 /* for some errors, we should just try reopening the file */
6151 if (nfs_mount_state_error_delegation_lost(error)) {
6152 reopen = error;
6153 }
6154 }
6155 if (!error || reopen) {
6156 lck_mtx_lock(&nofp->nof_lock);
6157 nofp->nof_w += nofp->nof_d_w;
6158 nofp->nof_d_w = 0;
6159 lck_mtx_unlock(&nofp->nof_lock);
6160 }
6161 }
6162 if ((!error || reopen) && nofp->nof_d_r) {
6163 if (!error) {
6164 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6165 /* for some errors, we should just try reopening the file */
6166 if (nfs_mount_state_error_delegation_lost(error)) {
6167 reopen = error;
6168 }
6169 }
6170 if (!error || reopen) {
6171 lck_mtx_lock(&nofp->nof_lock);
6172 nofp->nof_r += nofp->nof_d_r;
6173 nofp->nof_d_r = 0;
6174 lck_mtx_unlock(&nofp->nof_lock);
6175 }
6176 }
6177
6178 if (reopen) {
6179 /*
6180 * Any problems with the delegation probably indicates that we
6181 * should review/return all of our current delegation state.
6182 */
6183 if ((nmp = NFSTONMP(nofp->nof_np))) {
6184 nfs4_delegation_return_enqueue(nofp->nof_np);
6185 lck_mtx_lock(&nmp->nm_lock);
6186 nfs_need_recover(nmp, NFSERR_EXPIRED);
6187 lck_mtx_unlock(&nmp->nm_lock);
6188 }
6189 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6190 /* just reopen the file on next access */
6191 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6192 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6193 lck_mtx_lock(&nofp->nof_lock);
6194 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6195 lck_mtx_unlock(&nofp->nof_lock);
6196 return 0;
6197 }
6198 if (reopen) {
6199 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6200 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6201 }
6202 }
6203
6204 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6205 /* claim delegated locks */
6206 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6207 if (nlop->nlo_open_owner != noop) {
6208 continue;
6209 }
6210 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6211 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6212 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6213 continue;
6214 }
6215 /* skip non-delegated locks */
6216 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6217 continue;
6218 }
6219 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6220 if (error) {
6221 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6222 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6223 break;
6224 }
6225 // else {
6226 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6227 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6228 // }
6229 }
6230 if (error) {
6231 break;
6232 }
6233 }
6234 }
6235
6236 if (!error) { /* all state claimed successfully! */
6237 return 0;
6238 }
6239
6240 /* restart if it looks like a problem more than just losing the delegation */
6241 if (!nfs_mount_state_error_delegation_lost(error) &&
6242 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6243 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6244 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6245 nfs_need_reconnect(nmp);
6246 }
6247 return error;
6248 }
6249
6250 /* delegated state lost (once held but now not claimable) */
6251 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6252
6253 /*
6254 * Any problems with the delegation probably indicates that we
6255 * should review/return all of our current delegation state.
6256 */
6257 if ((nmp = NFSTONMP(nofp->nof_np))) {
6258 nfs4_delegation_return_enqueue(nofp->nof_np);
6259 lck_mtx_lock(&nmp->nm_lock);
6260 nfs_need_recover(nmp, NFSERR_EXPIRED);
6261 lck_mtx_unlock(&nmp->nm_lock);
6262 }
6263
6264 /* revoke all open file state */
6265 nfs_revoke_open_state_for_node(nofp->nof_np);
6266
6267 return error;
6268 }
6269
6270 /*
6271 * Release all open state for the given node.
6272 */
6273 void
6274 nfs_release_open_state_for_node(nfsnode_t np, int force)
6275 {
6276 struct nfsmount *nmp = NFSTONMP(np);
6277 struct nfs_open_file *nofp;
6278 struct nfs_file_lock *nflp, *nextnflp;
6279
6280 /* drop held locks */
6281 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6282 /* skip dead & blocked lock requests */
6283 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6284 continue;
6285 }
6286 /* send an unlock if not a delegated lock */
6287 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6288 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6289 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6290 }
6291 /* kill/remove the lock */
6292 lck_mtx_lock(&np->n_openlock);
6293 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6294 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6295 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6296 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6297 if (nflp->nfl_blockcnt) {
6298 /* wake up anyone blocked on this lock */
6299 wakeup(nflp);
6300 } else {
6301 /* remove nflp from lock list and destroy */
6302 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6303 nfs_file_lock_destroy(nflp);
6304 }
6305 lck_mtx_unlock(&np->n_openlock);
6306 }
6307
6308 lck_mtx_lock(&np->n_openlock);
6309
6310 /* drop all opens */
6311 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6312 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6313 continue;
6314 }
6315 /* mark open state as lost */
6316 lck_mtx_lock(&nofp->nof_lock);
6317 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6318 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6319
6320 lck_mtx_unlock(&nofp->nof_lock);
6321 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6322 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6323 }
6324 }
6325
6326 lck_mtx_unlock(&np->n_openlock);
6327 }
6328
6329 /*
6330 * State for a node has been lost, drop it, and revoke the node.
6331 * Attempt to return any state if possible in case the server
6332 * might somehow think we hold it.
6333 */
6334 void
6335 nfs_revoke_open_state_for_node(nfsnode_t np)
6336 {
6337 struct nfsmount *nmp;
6338
6339 /* mark node as needing to be revoked */
6340 nfs_node_lock_force(np);
6341 if (np->n_flag & NREVOKE) { /* already revoked? */
6342 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6343 nfs_node_unlock(np);
6344 return;
6345 }
6346 np->n_flag |= NREVOKE;
6347 nfs_node_unlock(np);
6348
6349 nfs_release_open_state_for_node(np, 0);
6350 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6351
6352 /* mark mount as needing a revoke scan and have the socket thread do it. */
6353 if ((nmp = NFSTONMP(np))) {
6354 lck_mtx_lock(&nmp->nm_lock);
6355 nmp->nm_state |= NFSSTA_REVOKE;
6356 nfs_mount_sock_thread_wake(nmp);
6357 lck_mtx_unlock(&nmp->nm_lock);
6358 }
6359 }
6360
6361 /*
6362 * Claim the delegated open combinations that each of this node's open files hold.
6363 */
6364 int
6365 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6366 {
6367 struct nfs_open_file *nofp;
6368 int error = 0;
6369
6370 lck_mtx_lock(&np->n_openlock);
6371
6372 /* walk the open file list looking for opens with delegated state to claim */
6373 restart:
6374 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6375 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6376 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6377 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6378 continue;
6379 }
6380 lck_mtx_unlock(&np->n_openlock);
6381 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6382 lck_mtx_lock(&np->n_openlock);
6383 if (error) {
6384 break;
6385 }
6386 goto restart;
6387 }
6388
6389 lck_mtx_unlock(&np->n_openlock);
6390
6391 return error;
6392 }
6393
6394 /*
6395 * Mark a node as needed to have its delegation returned.
6396 * Queue it up on the delegation return queue.
6397 * Make sure the thread is running.
6398 */
6399 void
6400 nfs4_delegation_return_enqueue(nfsnode_t np)
6401 {
6402 struct nfsmount *nmp;
6403
6404 nmp = NFSTONMP(np);
6405 if (nfs_mount_gone(nmp)) {
6406 return;
6407 }
6408
6409 lck_mtx_lock(&np->n_openlock);
6410 np->n_openflags |= N_DELEG_RETURN;
6411 lck_mtx_unlock(&np->n_openlock);
6412
6413 lck_mtx_lock(&nmp->nm_lock);
6414 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6415 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6416 }
6417 nfs_mount_sock_thread_wake(nmp);
6418 lck_mtx_unlock(&nmp->nm_lock);
6419 }
6420
6421 /*
6422 * return any delegation we may have for the given node
6423 */
6424 int
6425 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6426 {
6427 struct nfsmount *nmp;
6428 fhandle_t fh;
6429 nfs_stateid dstateid;
6430 int error;
6431
6432 nmp = NFSTONMP(np);
6433 if (nfs_mount_gone(nmp)) {
6434 return ENXIO;
6435 }
6436
6437 /* first, make sure the node's marked for delegation return */
6438 lck_mtx_lock(&np->n_openlock);
6439 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6440 lck_mtx_unlock(&np->n_openlock);
6441
6442 /* make sure nobody else is using the delegation state */
6443 if ((error = nfs_open_state_set_busy(np, NULL))) {
6444 goto out;
6445 }
6446
6447 /* claim any delegated state */
6448 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6449 goto out;
6450 }
6451
6452 /* return the delegation */
6453 lck_mtx_lock(&np->n_openlock);
6454 dstateid = np->n_dstateid;
6455 fh.fh_len = np->n_fhsize;
6456 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
6457 lck_mtx_unlock(&np->n_openlock);
6458 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
6459 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6460 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6461 lck_mtx_lock(&np->n_openlock);
6462 np->n_openflags &= ~N_DELEG_MASK;
6463 lck_mtx_lock(&nmp->nm_lock);
6464 if (np->n_dlink.tqe_next != NFSNOLIST) {
6465 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6466 np->n_dlink.tqe_next = NFSNOLIST;
6467 }
6468 lck_mtx_unlock(&nmp->nm_lock);
6469 lck_mtx_unlock(&np->n_openlock);
6470 }
6471
6472 out:
6473 /* make sure it's no longer on the return queue and clear the return flags */
6474 lck_mtx_lock(&nmp->nm_lock);
6475 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6476 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6477 np->n_dreturn.tqe_next = NFSNOLIST;
6478 }
6479 lck_mtx_unlock(&nmp->nm_lock);
6480 lck_mtx_lock(&np->n_openlock);
6481 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6482 lck_mtx_unlock(&np->n_openlock);
6483
6484 if (error) {
6485 NP(np, "nfs4_delegation_return, error %d", error);
6486 if (error == ETIMEDOUT) {
6487 nfs_need_reconnect(nmp);
6488 }
6489 if (nfs_mount_state_error_should_restart(error)) {
6490 /* make sure recovery happens */
6491 lck_mtx_lock(&nmp->nm_lock);
6492 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6493 lck_mtx_unlock(&nmp->nm_lock);
6494 }
6495 }
6496
6497 nfs_open_state_clear_busy(np);
6498
6499 return error;
6500 }
6501
6502 /*
6503 * RPC to return a delegation for a file handle
6504 */
6505 int
6506 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6507 {
6508 int error = 0, status, numops;
6509 uint64_t xid;
6510 struct nfsm_chain nmreq, nmrep;
6511 struct nfsreq_secinfo_args si;
6512
6513 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6514 nfsm_chain_null(&nmreq);
6515 nfsm_chain_null(&nmrep);
6516
6517 // PUTFH, DELEGRETURN
6518 numops = 2;
6519 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6520 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6521 numops--;
6522 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6523 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6524 numops--;
6525 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6526 nfsm_chain_add_stateid(error, &nmreq, sid);
6527 nfsm_chain_build_done(error, &nmreq);
6528 nfsm_assert(error, (numops == 0), EPROTO);
6529 nfsmout_if(error);
6530 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6531 nfsm_chain_skip_tag(error, &nmrep);
6532 nfsm_chain_get_32(error, &nmrep, numops);
6533 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6534 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6535 nfsmout:
6536 nfsm_chain_cleanup(&nmreq);
6537 nfsm_chain_cleanup(&nmrep);
6538 return error;
6539 }
6540
6541
6542 /*
6543 * NFS read call.
6544 * Just call nfs_bioread() to do the work.
6545 *
6546 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6547 * without first calling VNOP_OPEN, so we make sure the file is open here.
6548 */
6549 int
6550 nfs_vnop_read(
6551 struct vnop_read_args /* {
6552 * struct vnodeop_desc *a_desc;
6553 * vnode_t a_vp;
6554 * struct uio *a_uio;
6555 * int a_ioflag;
6556 * vfs_context_t a_context;
6557 * } */*ap)
6558 {
6559 vnode_t vp = ap->a_vp;
6560 vfs_context_t ctx = ap->a_context;
6561 nfsnode_t np;
6562 struct nfsmount *nmp;
6563 struct nfs_open_owner *noop;
6564 struct nfs_open_file *nofp;
6565 int error;
6566
6567 if (vnode_vtype(ap->a_vp) != VREG) {
6568 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6569 }
6570
6571 np = VTONFS(vp);
6572 nmp = NFSTONMP(np);
6573 if (nfs_mount_gone(nmp)) {
6574 return ENXIO;
6575 }
6576 if (np->n_flag & NREVOKE) {
6577 return EIO;
6578 }
6579
6580 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6581 if (!noop) {
6582 return ENOMEM;
6583 }
6584 restart:
6585 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6586 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6587 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6588 error = EIO;
6589 }
6590 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6591 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6592 nofp = NULL;
6593 if (!error) {
6594 goto restart;
6595 }
6596 }
6597 if (error) {
6598 nfs_open_owner_rele(noop);
6599 return error;
6600 }
6601 /*
6602 * Since the read path is a hot path, if we already have
6603 * read access, lets go and try and do the read, without
6604 * busying the mount and open file node for this open owner.
6605 *
6606 * N.B. This is inherently racy w.r.t. an execve using
6607 * an already open file, in that the read at the end of
6608 * this routine will be racing with a potential close.
6609 * The code below ultimately has the same problem. In practice
6610 * this does not seem to be an issue.
6611 */
6612 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6613 nfs_open_owner_rele(noop);
6614 goto do_read;
6615 }
6616 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6617 if (error) {
6618 nfs_open_owner_rele(noop);
6619 return error;
6620 }
6621 /*
6622 * If we don't have a file already open with the access we need (read) then
6623 * we need to open one. Otherwise we just co-opt an open. We might not already
6624 * have access because we're trying to read the first page of the
6625 * file for execve.
6626 */
6627 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6628 if (error) {
6629 nfs_mount_state_in_use_end(nmp, 0);
6630 nfs_open_owner_rele(noop);
6631 return error;
6632 }
6633 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6634 /* we don't have the file open, so open it for read access if we're not denied */
6635 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6636 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6637 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6638 }
6639 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6640 nfs_open_file_clear_busy(nofp);
6641 nfs_mount_state_in_use_end(nmp, 0);
6642 nfs_open_owner_rele(noop);
6643 return EPERM;
6644 }
6645 if (np->n_flag & NREVOKE) {
6646 error = EIO;
6647 nfs_open_file_clear_busy(nofp);
6648 nfs_mount_state_in_use_end(nmp, 0);
6649 nfs_open_owner_rele(noop);
6650 return error;
6651 }
6652 if (nmp->nm_vers < NFS_VER4) {
6653 /* NFS v2/v3 opens are always allowed - so just add it. */
6654 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6655 } else {
6656 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6657 }
6658 if (!error) {
6659 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6660 }
6661 }
6662 if (nofp) {
6663 nfs_open_file_clear_busy(nofp);
6664 }
6665 if (nfs_mount_state_in_use_end(nmp, error)) {
6666 nofp = NULL;
6667 goto restart;
6668 }
6669 nfs_open_owner_rele(noop);
6670 if (error) {
6671 return error;
6672 }
6673 do_read:
6674 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6675 }
6676
6677 /*
6678 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6679 * Files are created using the NFSv4 OPEN RPC. So we must open the
6680 * file to create it and then close it.
6681 */
6682 int
6683 nfs4_vnop_create(
6684 struct vnop_create_args /* {
6685 * struct vnodeop_desc *a_desc;
6686 * vnode_t a_dvp;
6687 * vnode_t *a_vpp;
6688 * struct componentname *a_cnp;
6689 * struct vnode_attr *a_vap;
6690 * vfs_context_t a_context;
6691 * } */*ap)
6692 {
6693 vfs_context_t ctx = ap->a_context;
6694 struct componentname *cnp = ap->a_cnp;
6695 struct vnode_attr *vap = ap->a_vap;
6696 vnode_t dvp = ap->a_dvp;
6697 vnode_t *vpp = ap->a_vpp;
6698 struct nfsmount *nmp;
6699 nfsnode_t np;
6700 int error = 0, busyerror = 0, accessMode, denyMode;
6701 struct nfs_open_owner *noop = NULL;
6702 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6703
6704 nmp = VTONMP(dvp);
6705 if (nfs_mount_gone(nmp)) {
6706 return ENXIO;
6707 }
6708
6709 if (vap) {
6710 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6711 }
6712
6713 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6714 if (!noop) {
6715 return ENOMEM;
6716 }
6717
6718 restart:
6719 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6720 if (error) {
6721 nfs_open_owner_rele(noop);
6722 return error;
6723 }
6724
6725 /* grab a provisional, nodeless open file */
6726 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6727 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6728 printf("nfs_vnop_create: LOST\n");
6729 error = EIO;
6730 }
6731 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6732 /* This shouldn't happen given that this is a new, nodeless nofp */
6733 nfs_mount_state_in_use_end(nmp, 0);
6734 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6735 nfs_open_file_destroy(newnofp);
6736 newnofp = NULL;
6737 if (!error) {
6738 goto restart;
6739 }
6740 }
6741 if (!error) {
6742 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6743 }
6744 if (error) {
6745 if (newnofp) {
6746 nfs_open_file_destroy(newnofp);
6747 }
6748 newnofp = NULL;
6749 goto out;
6750 }
6751
6752 /*
6753 * We're just trying to create the file.
6754 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6755 */
6756 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6757 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6758
6759 /* Do the open/create */
6760 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6761 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6762 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6763 /*
6764 * Hmm... it looks like we may have a situation where the request was
6765 * retransmitted because we didn't get the first response which successfully
6766 * created/opened the file and then the second time we were denied the open
6767 * because the mode the file was created with doesn't allow write access.
6768 *
6769 * We'll try to work around this by temporarily updating the mode and
6770 * retrying the open.
6771 */
6772 struct vnode_attr vattr;
6773
6774 /* first make sure it's there */
6775 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6776 if (!error2 && np) {
6777 nfs_node_unlock(np);
6778 *vpp = NFSTOV(np);
6779 if (vnode_vtype(NFSTOV(np)) == VREG) {
6780 VATTR_INIT(&vattr);
6781 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6782 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6783 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6784 VATTR_INIT(&vattr);
6785 VATTR_SET(&vattr, va_mode, vap->va_mode);
6786 nfs4_setattr_rpc(np, &vattr, ctx);
6787 if (!error2) {
6788 error = 0;
6789 }
6790 }
6791 }
6792 if (error) {
6793 vnode_put(*vpp);
6794 *vpp = NULL;
6795 }
6796 }
6797 }
6798 if (!error && !*vpp) {
6799 printf("nfs4_open_rpc returned without a node?\n");
6800 /* Hmmm... with no node, we have no filehandle and can't close it */
6801 error = EIO;
6802 }
6803 if (error) {
6804 /* need to cleanup our temporary nofp */
6805 nfs_open_file_clear_busy(newnofp);
6806 nfs_open_file_destroy(newnofp);
6807 newnofp = NULL;
6808 goto out;
6809 }
6810 /* After we have a node, add our open file struct to the node */
6811 np = VTONFS(*vpp);
6812 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6813 nofp = newnofp;
6814 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6815 if (error) {
6816 /* This shouldn't happen, because we passed in a new nofp to use. */
6817 printf("nfs_open_file_find_internal failed! %d\n", error);
6818 goto out;
6819 } else if (nofp != newnofp) {
6820 /*
6821 * Hmm... an open file struct already exists.
6822 * Mark the existing one busy and merge our open into it.
6823 * Then destroy the one we created.
6824 * Note: there's no chance of an open confict because the
6825 * open has already been granted.
6826 */
6827 busyerror = nfs_open_file_set_busy(nofp, NULL);
6828 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6829 nofp->nof_stateid = newnofp->nof_stateid;
6830 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6831 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6832 }
6833 nfs_open_file_clear_busy(newnofp);
6834 nfs_open_file_destroy(newnofp);
6835 }
6836 newnofp = NULL;
6837 /* mark the node as holding a create-initiated open */
6838 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6839 nofp->nof_creator = current_thread();
6840 out:
6841 if (nofp && !busyerror) {
6842 nfs_open_file_clear_busy(nofp);
6843 }
6844 if (nfs_mount_state_in_use_end(nmp, error)) {
6845 nofp = newnofp = NULL;
6846 busyerror = 0;
6847 goto restart;
6848 }
6849 if (noop) {
6850 nfs_open_owner_rele(noop);
6851 }
6852 return error;
6853 }
6854
6855 /*
6856 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6857 */
6858 int
6859 nfs4_create_rpc(
6860 vfs_context_t ctx,
6861 nfsnode_t dnp,
6862 struct componentname *cnp,
6863 struct vnode_attr *vap,
6864 int type,
6865 char *link,
6866 nfsnode_t *npp)
6867 {
6868 struct nfsmount *nmp;
6869 struct nfs_vattr nvattr;
6870 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6871 int nfsvers, namedattrs, numops;
6872 u_int64_t xid, savedxid = 0;
6873 nfsnode_t np = NULL;
6874 vnode_t newvp = NULL;
6875 struct nfsm_chain nmreq, nmrep;
6876 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6877 const char *tag;
6878 nfs_specdata sd;
6879 fhandle_t fh;
6880 struct nfsreq rq, *req = &rq;
6881 struct nfs_dulookup dul;
6882 struct nfsreq_secinfo_args si;
6883
6884 nmp = NFSTONMP(dnp);
6885 if (nfs_mount_gone(nmp)) {
6886 return ENXIO;
6887 }
6888 nfsvers = nmp->nm_vers;
6889 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6890 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
6891 return EINVAL;
6892 }
6893
6894 sd.specdata1 = sd.specdata2 = 0;
6895
6896 switch (type) {
6897 case NFLNK:
6898 tag = "symlink";
6899 break;
6900 case NFBLK:
6901 case NFCHR:
6902 tag = "mknod";
6903 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
6904 return EINVAL;
6905 }
6906 sd.specdata1 = major(vap->va_rdev);
6907 sd.specdata2 = minor(vap->va_rdev);
6908 break;
6909 case NFSOCK:
6910 case NFFIFO:
6911 tag = "mknod";
6912 break;
6913 case NFDIR:
6914 tag = "mkdir";
6915 break;
6916 default:
6917 return EINVAL;
6918 }
6919
6920 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6921
6922 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6923 if (!namedattrs) {
6924 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6925 }
6926
6927 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6928 NVATTR_INIT(&nvattr);
6929 nfsm_chain_null(&nmreq);
6930 nfsm_chain_null(&nmrep);
6931
6932 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6933 numops = 6;
6934 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6935 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6936 numops--;
6937 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6938 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6939 numops--;
6940 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6941 numops--;
6942 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6943 nfsm_chain_add_32(error, &nmreq, type);
6944 if (type == NFLNK) {
6945 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6946 } else if ((type == NFBLK) || (type == NFCHR)) {
6947 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6948 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6949 }
6950 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
6951 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
6952 numops--;
6953 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6954 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
6955 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
6956 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
6957 numops--;
6958 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
6959 numops--;
6960 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6961 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
6962 nfsm_chain_build_done(error, &nmreq);
6963 nfsm_assert(error, (numops == 0), EPROTO);
6964 nfsmout_if(error);
6965
6966 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
6967 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
6968 if (!error) {
6969 if (!namedattrs) {
6970 nfs_dulookup_start(&dul, dnp, ctx);
6971 }
6972 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
6973 }
6974
6975 if ((lockerror = nfs_node_lock(dnp))) {
6976 error = lockerror;
6977 }
6978 nfsm_chain_skip_tag(error, &nmrep);
6979 nfsm_chain_get_32(error, &nmrep, numops);
6980 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6981 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
6982 nfsmout_if(error);
6983 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
6984 nfsm_chain_check_change_info(error, &nmrep, dnp);
6985 bmlen = NFS_ATTR_BITMAP_LEN;
6986 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
6987 /* At this point if we have no error, the object was created. */
6988 /* if we don't get attributes, then we should lookitup. */
6989 create_error = error;
6990 nfsmout_if(error);
6991 nfs_vattr_set_supported(bitmap, vap);
6992 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6993 nfsmout_if(error);
6994 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
6995 nfsmout_if(error);
6996 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
6997 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
6998 error = EBADRPC;
6999 goto nfsmout;
7000 }
7001 /* directory attributes: if we don't get them, make sure to invalidate */
7002 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7003 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7004 savedxid = xid;
7005 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7006 if (error) {
7007 NATTRINVALIDATE(dnp);
7008 }
7009
7010 nfsmout:
7011 nfsm_chain_cleanup(&nmreq);
7012 nfsm_chain_cleanup(&nmrep);
7013
7014 if (!lockerror) {
7015 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7016 dnp->n_flag &= ~NNEGNCENTRIES;
7017 cache_purge_negatives(NFSTOV(dnp));
7018 }
7019 dnp->n_flag |= NMODIFIED;
7020 nfs_node_unlock(dnp);
7021 /* nfs_getattr() will check changed and purge caches */
7022 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7023 }
7024
7025 if (!error && fh.fh_len) {
7026 /* create the vnode with the filehandle and attributes */
7027 xid = savedxid;
7028 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
7029 if (!error) {
7030 newvp = NFSTOV(np);
7031 }
7032 }
7033 NVATTR_CLEANUP(&nvattr);
7034
7035 if (!namedattrs) {
7036 nfs_dulookup_finish(&dul, dnp, ctx);
7037 }
7038
7039 /*
7040 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7041 * if we can succeed in looking up the object.
7042 */
7043 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7044 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7045 if (!error) {
7046 newvp = NFSTOV(np);
7047 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7048 error = EEXIST;
7049 }
7050 }
7051 }
7052 if (!busyerror) {
7053 nfs_node_clear_busy(dnp);
7054 }
7055 if (error) {
7056 if (newvp) {
7057 nfs_node_unlock(np);
7058 vnode_put(newvp);
7059 }
7060 } else {
7061 nfs_node_unlock(np);
7062 *npp = np;
7063 }
7064 return error;
7065 }
7066
7067 int
7068 nfs4_vnop_mknod(
7069 struct vnop_mknod_args /* {
7070 * struct vnodeop_desc *a_desc;
7071 * vnode_t a_dvp;
7072 * vnode_t *a_vpp;
7073 * struct componentname *a_cnp;
7074 * struct vnode_attr *a_vap;
7075 * vfs_context_t a_context;
7076 * } */*ap)
7077 {
7078 nfsnode_t np = NULL;
7079 struct nfsmount *nmp;
7080 int error;
7081
7082 nmp = VTONMP(ap->a_dvp);
7083 if (nfs_mount_gone(nmp)) {
7084 return ENXIO;
7085 }
7086
7087 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7088 return EINVAL;
7089 }
7090 switch (ap->a_vap->va_type) {
7091 case VBLK:
7092 case VCHR:
7093 case VFIFO:
7094 case VSOCK:
7095 break;
7096 default:
7097 return ENOTSUP;
7098 }
7099
7100 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7101 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7102 if (!error) {
7103 *ap->a_vpp = NFSTOV(np);
7104 }
7105 return error;
7106 }
7107
7108 int
7109 nfs4_vnop_mkdir(
7110 struct vnop_mkdir_args /* {
7111 * struct vnodeop_desc *a_desc;
7112 * vnode_t a_dvp;
7113 * vnode_t *a_vpp;
7114 * struct componentname *a_cnp;
7115 * struct vnode_attr *a_vap;
7116 * vfs_context_t a_context;
7117 * } */*ap)
7118 {
7119 nfsnode_t np = NULL;
7120 int error;
7121
7122 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7123 NFDIR, NULL, &np);
7124 if (!error) {
7125 *ap->a_vpp = NFSTOV(np);
7126 }
7127 return error;
7128 }
7129
7130 int
7131 nfs4_vnop_symlink(
7132 struct vnop_symlink_args /* {
7133 * struct vnodeop_desc *a_desc;
7134 * vnode_t a_dvp;
7135 * vnode_t *a_vpp;
7136 * struct componentname *a_cnp;
7137 * struct vnode_attr *a_vap;
7138 * char *a_target;
7139 * vfs_context_t a_context;
7140 * } */*ap)
7141 {
7142 nfsnode_t np = NULL;
7143 int error;
7144
7145 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7146 NFLNK, ap->a_target, &np);
7147 if (!error) {
7148 *ap->a_vpp = NFSTOV(np);
7149 }
7150 return error;
7151 }
7152
7153 int
7154 nfs4_vnop_link(
7155 struct vnop_link_args /* {
7156 * struct vnodeop_desc *a_desc;
7157 * vnode_t a_vp;
7158 * vnode_t a_tdvp;
7159 * struct componentname *a_cnp;
7160 * vfs_context_t a_context;
7161 * } */*ap)
7162 {
7163 vfs_context_t ctx = ap->a_context;
7164 vnode_t vp = ap->a_vp;
7165 vnode_t tdvp = ap->a_tdvp;
7166 struct componentname *cnp = ap->a_cnp;
7167 int error = 0, lockerror = ENOENT, status;
7168 struct nfsmount *nmp;
7169 nfsnode_t np = VTONFS(vp);
7170 nfsnode_t tdnp = VTONFS(tdvp);
7171 int nfsvers, numops;
7172 u_int64_t xid, savedxid;
7173 struct nfsm_chain nmreq, nmrep;
7174 struct nfsreq_secinfo_args si;
7175
7176 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7177 return EXDEV;
7178 }
7179
7180 nmp = VTONMP(vp);
7181 if (nfs_mount_gone(nmp)) {
7182 return ENXIO;
7183 }
7184 nfsvers = nmp->nm_vers;
7185 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7186 return EINVAL;
7187 }
7188 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7189 return EINVAL;
7190 }
7191
7192 /*
7193 * Push all writes to the server, so that the attribute cache
7194 * doesn't get "out of sync" with the server.
7195 * XXX There should be a better way!
7196 */
7197 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7198
7199 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7200 return error;
7201 }
7202
7203 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7204 nfsm_chain_null(&nmreq);
7205 nfsm_chain_null(&nmrep);
7206
7207 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7208 numops = 7;
7209 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7210 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7211 numops--;
7212 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7213 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7214 numops--;
7215 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7216 numops--;
7217 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7218 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7219 numops--;
7220 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7221 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7222 numops--;
7223 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7224 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7225 numops--;
7226 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7227 numops--;
7228 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7229 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7230 nfsm_chain_build_done(error, &nmreq);
7231 nfsm_assert(error, (numops == 0), EPROTO);
7232 nfsmout_if(error);
7233 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7234
7235 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7236 error = lockerror;
7237 goto nfsmout;
7238 }
7239 nfsm_chain_skip_tag(error, &nmrep);
7240 nfsm_chain_get_32(error, &nmrep, numops);
7241 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7242 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7243 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7244 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7245 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7246 /* directory attributes: if we don't get them, make sure to invalidate */
7247 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7248 savedxid = xid;
7249 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7250 if (error) {
7251 NATTRINVALIDATE(tdnp);
7252 }
7253 /* link attributes: if we don't get them, make sure to invalidate */
7254 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7255 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7256 xid = savedxid;
7257 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7258 if (error) {
7259 NATTRINVALIDATE(np);
7260 }
7261 nfsmout:
7262 nfsm_chain_cleanup(&nmreq);
7263 nfsm_chain_cleanup(&nmrep);
7264 if (!lockerror) {
7265 tdnp->n_flag |= NMODIFIED;
7266 }
7267 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7268 if (error == EEXIST) {
7269 error = 0;
7270 }
7271 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7272 tdnp->n_flag &= ~NNEGNCENTRIES;
7273 cache_purge_negatives(tdvp);
7274 }
7275 if (!lockerror) {
7276 nfs_node_unlock2(tdnp, np);
7277 }
7278 nfs_node_clear_busy2(tdnp, np);
7279 return error;
7280 }
7281
7282 int
7283 nfs4_vnop_rmdir(
7284 struct vnop_rmdir_args /* {
7285 * struct vnodeop_desc *a_desc;
7286 * vnode_t a_dvp;
7287 * vnode_t a_vp;
7288 * struct componentname *a_cnp;
7289 * vfs_context_t a_context;
7290 * } */*ap)
7291 {
7292 vfs_context_t ctx = ap->a_context;
7293 vnode_t vp = ap->a_vp;
7294 vnode_t dvp = ap->a_dvp;
7295 struct componentname *cnp = ap->a_cnp;
7296 struct nfsmount *nmp;
7297 int error = 0, namedattrs;
7298 nfsnode_t np = VTONFS(vp);
7299 nfsnode_t dnp = VTONFS(dvp);
7300 struct nfs_dulookup dul;
7301
7302 if (vnode_vtype(vp) != VDIR) {
7303 return EINVAL;
7304 }
7305
7306 nmp = NFSTONMP(dnp);
7307 if (nfs_mount_gone(nmp)) {
7308 return ENXIO;
7309 }
7310 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7311
7312 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7313 return error;
7314 }
7315
7316 if (!namedattrs) {
7317 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7318 nfs_dulookup_start(&dul, dnp, ctx);
7319 }
7320
7321 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7322 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7323
7324 nfs_name_cache_purge(dnp, np, cnp, ctx);
7325 /* nfs_getattr() will check changed and purge caches */
7326 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7327 if (!namedattrs) {
7328 nfs_dulookup_finish(&dul, dnp, ctx);
7329 }
7330 nfs_node_clear_busy2(dnp, np);
7331
7332 /*
7333 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7334 */
7335 if (error == ENOENT) {
7336 error = 0;
7337 }
7338 if (!error) {
7339 /*
7340 * remove nfsnode from hash now so we can't accidentally find it
7341 * again if another object gets created with the same filehandle
7342 * before this vnode gets reclaimed
7343 */
7344 lck_mtx_lock(nfs_node_hash_mutex);
7345 if (np->n_hflag & NHHASHED) {
7346 LIST_REMOVE(np, n_hash);
7347 np->n_hflag &= ~NHHASHED;
7348 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7349 }
7350 lck_mtx_unlock(nfs_node_hash_mutex);
7351 }
7352 return error;
7353 }
7354
7355 /*
7356 * NFSv4 Named Attributes
7357 *
7358 * Both the extended attributes interface and the named streams interface
7359 * are backed by NFSv4 named attributes. The implementations for both use
7360 * a common set of routines in an attempt to reduce code duplication, to
7361 * increase efficiency, to increase caching of both names and data, and to
7362 * confine the complexity.
7363 *
7364 * Each NFS node caches its named attribute directory's file handle.
7365 * The directory nodes for the named attribute directories are handled
7366 * exactly like regular directories (with a couple minor exceptions).
7367 * Named attribute nodes are also treated as much like regular files as
7368 * possible.
7369 *
7370 * Most of the heavy lifting is done by nfs4_named_attr_get().
7371 */
7372
7373 /*
7374 * Get the given node's attribute directory node.
7375 * If !fetch, then only return a cached node.
7376 * Otherwise, we will attempt to fetch the node from the server.
7377 * (Note: the node should be marked busy.)
7378 */
7379 nfsnode_t
7380 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7381 {
7382 nfsnode_t adnp = NULL;
7383 struct nfsmount *nmp;
7384 int error = 0, status, numops;
7385 struct nfsm_chain nmreq, nmrep;
7386 u_int64_t xid;
7387 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7388 fhandle_t fh;
7389 struct nfs_vattr nvattr;
7390 struct componentname cn;
7391 struct nfsreq rq, *req = &rq;
7392 struct nfsreq_secinfo_args si;
7393
7394 nmp = NFSTONMP(np);
7395 if (nfs_mount_gone(nmp)) {
7396 return NULL;
7397 }
7398 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7399 return NULL;
7400 }
7401
7402 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7403 NVATTR_INIT(&nvattr);
7404 nfsm_chain_null(&nmreq);
7405 nfsm_chain_null(&nmrep);
7406
7407 bzero(&cn, sizeof(cn));
7408 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7409 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7410 cn.cn_nameiop = LOOKUP;
7411
7412 if (np->n_attrdirfh) {
7413 // XXX can't set parent correctly (to np) yet
7414 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7415 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7416 if (adnp) {
7417 goto nfsmout;
7418 }
7419 }
7420 if (!fetch) {
7421 error = ENOENT;
7422 goto nfsmout;
7423 }
7424
7425 // PUTFH, OPENATTR, GETATTR
7426 numops = 3;
7427 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7428 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7429 numops--;
7430 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7431 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7432 numops--;
7433 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7434 nfsm_chain_add_32(error, &nmreq, 0);
7435 numops--;
7436 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7437 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7438 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7439 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7440 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7441 nfsm_chain_build_done(error, &nmreq);
7442 nfsm_assert(error, (numops == 0), EPROTO);
7443 nfsmout_if(error);
7444 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7445 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7446 if (!error) {
7447 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7448 }
7449
7450 nfsm_chain_skip_tag(error, &nmrep);
7451 nfsm_chain_get_32(error, &nmrep, numops);
7452 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7453 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7454 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7455 nfsmout_if(error);
7456 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7457 nfsmout_if(error);
7458 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7459 error = ENOENT;
7460 goto nfsmout;
7461 }
7462 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7463 /* (re)allocate attrdir fh buffer */
7464 if (np->n_attrdirfh) {
7465 FREE(np->n_attrdirfh, M_TEMP);
7466 }
7467 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7468 }
7469 if (!np->n_attrdirfh) {
7470 error = ENOMEM;
7471 goto nfsmout;
7472 }
7473 /* cache the attrdir fh in the node */
7474 *np->n_attrdirfh = fh.fh_len;
7475 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7476 /* create node for attrdir */
7477 // XXX can't set parent correctly (to np) yet
7478 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7479 nfsmout:
7480 NVATTR_CLEANUP(&nvattr);
7481 nfsm_chain_cleanup(&nmreq);
7482 nfsm_chain_cleanup(&nmrep);
7483
7484 if (adnp) {
7485 /* sanity check that this node is an attribute directory */
7486 if (adnp->n_vattr.nva_type != VDIR) {
7487 error = EINVAL;
7488 }
7489 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7490 error = EINVAL;
7491 }
7492 nfs_node_unlock(adnp);
7493 if (error) {
7494 vnode_put(NFSTOV(adnp));
7495 }
7496 }
7497 return error ? NULL : adnp;
7498 }
7499
7500 /*
7501 * Get the given node's named attribute node for the name given.
7502 *
7503 * In an effort to increase the performance of named attribute access, we try
7504 * to reduce server requests by doing the following:
7505 *
7506 * - cache the node's named attribute directory file handle in the node
7507 * - maintain a directory vnode for the attribute directory
7508 * - use name cache entries (positive and negative) to speed up lookups
7509 * - optionally open the named attribute (with the given accessMode) in the same RPC
7510 * - combine attribute directory retrieval with the lookup/open RPC
7511 * - optionally prefetch the named attribute's first block of data in the same RPC
7512 *
7513 * Also, in an attempt to reduce the number of copies/variations of this code,
7514 * parts of the RPC building/processing code are conditionalized on what is
7515 * needed for any particular request (openattr, lookup vs. open, read).
7516 *
7517 * Note that because we may not have the attribute directory node when we start
7518 * the lookup/open, we lock both the node and the attribute directory node.
7519 */
7520
7521 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7522 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7523 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7524 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7525
7526 int
7527 nfs4_named_attr_get(
7528 nfsnode_t np,
7529 struct componentname *cnp,
7530 uint32_t accessMode,
7531 int flags,
7532 vfs_context_t ctx,
7533 nfsnode_t *anpp,
7534 struct nfs_open_file **nofpp)
7535 {
7536 struct nfsmount *nmp;
7537 int error = 0, open_error = EIO;
7538 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7539 int create, guarded, prefetch, truncate, noopbusy = 0;
7540 int open, status, numops, hadattrdir, negnamecache;
7541 struct nfs_vattr nvattr;
7542 struct vnode_attr vattr;
7543 nfsnode_t adnp = NULL, anp = NULL;
7544 vnode_t avp = NULL;
7545 u_int64_t xid, savedxid = 0;
7546 struct nfsm_chain nmreq, nmrep;
7547 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7548 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7549 nfs_stateid stateid, dstateid;
7550 fhandle_t fh;
7551 struct nfs_open_owner *noop = NULL;
7552 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7553 struct vnop_access_args naa;
7554 thread_t thd;
7555 kauth_cred_t cred;
7556 struct timeval now;
7557 char sbuf[64], *s;
7558 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7559 struct kauth_ace ace;
7560 struct nfsreq rq, *req = &rq;
7561 struct nfsreq_secinfo_args si;
7562
7563 *anpp = NULL;
7564 fh.fh_len = 0;
7565 rflags = delegation = recall = eof = rlen = retlen = 0;
7566 ace.ace_flags = 0;
7567 s = sbuf;
7568 slen = sizeof(sbuf);
7569
7570 nmp = NFSTONMP(np);
7571 if (nfs_mount_gone(nmp)) {
7572 return ENXIO;
7573 }
7574 NVATTR_INIT(&nvattr);
7575 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7576 thd = vfs_context_thread(ctx);
7577 cred = vfs_context_ucred(ctx);
7578 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7579 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7580 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7581 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7582
7583 if (!create) {
7584 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7585 if (error) {
7586 return error;
7587 }
7588 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7589 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7590 return ENOATTR;
7591 }
7592 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7593 /* shouldn't happen... but just be safe */
7594 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7595 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7596 }
7597 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7598 if (open) {
7599 /*
7600 * We're trying to open the file.
7601 * We'll create/open it with the given access mode,
7602 * and set NFS_OPEN_FILE_CREATE.
7603 */
7604 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7605 if (prefetch && guarded) {
7606 prefetch = 0; /* no sense prefetching data that can't be there */
7607 }
7608 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7609 if (!noop) {
7610 return ENOMEM;
7611 }
7612 }
7613
7614 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7615 return error;
7616 }
7617
7618 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7619 hadattrdir = (adnp != NULL);
7620 if (prefetch) {
7621 microuptime(&now);
7622 /* use the special state ID because we don't have a real one to send */
7623 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7624 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7625 }
7626 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7627 nfsm_chain_null(&nmreq);
7628 nfsm_chain_null(&nmrep);
7629
7630 if (hadattrdir) {
7631 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7632 goto nfsmout;
7633 }
7634 /* nfs_getattr() will check changed and purge caches */
7635 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7636 nfsmout_if(error);
7637 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7638 switch (error) {
7639 case ENOENT:
7640 /* negative cache entry */
7641 goto nfsmout;
7642 case 0:
7643 /* cache miss */
7644 /* try dir buf cache lookup */
7645 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7646 if (!error && anp) {
7647 /* dir buf cache hit */
7648 *anpp = anp;
7649 error = -1;
7650 }
7651 if (error != -1) { /* cache miss */
7652 break;
7653 }
7654 /* FALLTHROUGH */
7655 case -1:
7656 /* cache hit, not really an error */
7657 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7658 if (!anp && avp) {
7659 *anpp = anp = VTONFS(avp);
7660 }
7661
7662 nfs_node_clear_busy(adnp);
7663 adbusyerror = ENOENT;
7664
7665 /* check for directory access */
7666 naa.a_desc = &vnop_access_desc;
7667 naa.a_vp = NFSTOV(adnp);
7668 naa.a_action = KAUTH_VNODE_SEARCH;
7669 naa.a_context = ctx;
7670
7671 /* compute actual success/failure based on accessibility */
7672 error = nfs_vnop_access(&naa);
7673 /* FALLTHROUGH */
7674 default:
7675 /* we either found it, or hit an error */
7676 if (!error && guarded) {
7677 /* found cached entry but told not to use it */
7678 error = EEXIST;
7679 vnode_put(NFSTOV(anp));
7680 *anpp = anp = NULL;
7681 }
7682 /* we're done if error or we don't need to open */
7683 if (error || !open) {
7684 goto nfsmout;
7685 }
7686 /* no error and we need to open... */
7687 }
7688 }
7689
7690 if (open) {
7691 restart:
7692 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7693 if (error) {
7694 nfs_open_owner_rele(noop);
7695 noop = NULL;
7696 goto nfsmout;
7697 }
7698 inuse = 1;
7699
7700 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7701 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7702 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7703 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7704 error = EIO;
7705 }
7706 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7707 nfs_mount_state_in_use_end(nmp, 0);
7708 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7709 nfs_open_file_destroy(newnofp);
7710 newnofp = NULL;
7711 if (!error) {
7712 goto restart;
7713 }
7714 }
7715 if (!error) {
7716 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7717 }
7718 if (error) {
7719 if (newnofp) {
7720 nfs_open_file_destroy(newnofp);
7721 }
7722 newnofp = NULL;
7723 goto nfsmout;
7724 }
7725 if (anp) {
7726 /*
7727 * We already have the node. So we just need to open
7728 * it - which we may be able to do with a delegation.
7729 */
7730 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7731 if (!error) {
7732 /* open succeeded, so our open file is no longer temporary */
7733 nofp = newnofp;
7734 nofpbusyerror = 0;
7735 newnofp = NULL;
7736 if (nofpp) {
7737 *nofpp = nofp;
7738 }
7739 }
7740 goto nfsmout;
7741 }
7742 }
7743
7744 /*
7745 * We either don't have the attrdir or we didn't find the attribute
7746 * in the name cache, so we need to talk to the server.
7747 *
7748 * If we don't have the attrdir, we'll need to ask the server for that too.
7749 * If the caller is requesting that the attribute be created, we need to
7750 * make sure the attrdir is created.
7751 * The caller may also request that the first block of an existing attribute
7752 * be retrieved at the same time.
7753 */
7754
7755 if (open) {
7756 /* need to mark the open owner busy during the RPC */
7757 if ((error = nfs_open_owner_set_busy(noop, thd))) {
7758 goto nfsmout;
7759 }
7760 noopbusy = 1;
7761 }
7762
7763 /*
7764 * We'd like to get updated post-open/lookup attributes for the
7765 * directory and we may also want to prefetch some data via READ.
7766 * We'd like the READ results to be last so that we can leave the
7767 * data in the mbufs until the end.
7768 *
7769 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7770 */
7771 numops = 5;
7772 if (!hadattrdir) {
7773 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7774 }
7775 if (prefetch) {
7776 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7777 }
7778 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7779 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7780 if (hadattrdir) {
7781 numops--;
7782 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7783 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7784 } else {
7785 numops--;
7786 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7787 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7788 numops--;
7789 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7790 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7791 numops--;
7792 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7793 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7794 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7795 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7796 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7797 }
7798 if (open) {
7799 numops--;
7800 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7801 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7802 nfsm_chain_add_32(error, &nmreq, accessMode);
7803 nfsm_chain_add_32(error, &nmreq, denyMode);
7804 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7805 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7806 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7807 nfsm_chain_add_32(error, &nmreq, create);
7808 if (create) {
7809 nfsm_chain_add_32(error, &nmreq, guarded);
7810 VATTR_INIT(&vattr);
7811 if (truncate) {
7812 VATTR_SET(&vattr, va_data_size, 0);
7813 }
7814 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7815 }
7816 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7817 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7818 } else {
7819 numops--;
7820 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7821 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7822 }
7823 numops--;
7824 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7825 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7826 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7827 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7828 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7829 if (prefetch) {
7830 numops--;
7831 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7832 }
7833 if (hadattrdir) {
7834 numops--;
7835 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7836 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7837 } else {
7838 numops--;
7839 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7840 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7841 numops--;
7842 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7843 nfsm_chain_add_32(error, &nmreq, 0);
7844 }
7845 numops--;
7846 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7847 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7848 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7849 if (prefetch) {
7850 numops--;
7851 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7852 numops--;
7853 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7854 VATTR_INIT(&vattr);
7855 VATTR_SET(&vattr, va_data_size, 0);
7856 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7857 numops--;
7858 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7859 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7860 nfsm_chain_add_64(error, &nmreq, 0);
7861 nfsm_chain_add_32(error, &nmreq, rlen);
7862 }
7863 nfsm_chain_build_done(error, &nmreq);
7864 nfsm_assert(error, (numops == 0), EPROTO);
7865 nfsmout_if(error);
7866 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7867 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7868 if (!error) {
7869 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7870 }
7871
7872 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
7873 error = adlockerror;
7874 }
7875 savedxid = xid;
7876 nfsm_chain_skip_tag(error, &nmrep);
7877 nfsm_chain_get_32(error, &nmrep, numops);
7878 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7879 if (!hadattrdir) {
7880 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7881 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7882 nfsmout_if(error);
7883 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7884 nfsmout_if(error);
7885 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7886 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7887 /* (re)allocate attrdir fh buffer */
7888 if (np->n_attrdirfh) {
7889 FREE(np->n_attrdirfh, M_TEMP);
7890 }
7891 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7892 }
7893 if (np->n_attrdirfh) {
7894 /* remember the attrdir fh in the node */
7895 *np->n_attrdirfh = fh.fh_len;
7896 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7897 /* create busied node for attrdir */
7898 struct componentname cn;
7899 bzero(&cn, sizeof(cn));
7900 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7901 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7902 cn.cn_nameiop = LOOKUP;
7903 // XXX can't set parent correctly (to np) yet
7904 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7905 if (!error) {
7906 adlockerror = 0;
7907 /* set the node busy */
7908 SET(adnp->n_flag, NBUSY);
7909 adbusyerror = 0;
7910 }
7911 /* if no adnp, oh well... */
7912 error = 0;
7913 }
7914 }
7915 NVATTR_CLEANUP(&nvattr);
7916 fh.fh_len = 0;
7917 }
7918 if (open) {
7919 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7920 nfs_owner_seqid_increment(noop, NULL, error);
7921 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7922 nfsm_chain_check_change_info(error, &nmrep, adnp);
7923 nfsm_chain_get_32(error, &nmrep, rflags);
7924 bmlen = NFS_ATTR_BITMAP_LEN;
7925 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7926 nfsm_chain_get_32(error, &nmrep, delegation);
7927 if (!error) {
7928 switch (delegation) {
7929 case NFS_OPEN_DELEGATE_NONE:
7930 break;
7931 case NFS_OPEN_DELEGATE_READ:
7932 case NFS_OPEN_DELEGATE_WRITE:
7933 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7934 nfsm_chain_get_32(error, &nmrep, recall);
7935 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
7936 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7937 }
7938 /* if we have any trouble accepting the ACE, just invalidate it */
7939 ace_type = ace_flags = ace_mask = len = 0;
7940 nfsm_chain_get_32(error, &nmrep, ace_type);
7941 nfsm_chain_get_32(error, &nmrep, ace_flags);
7942 nfsm_chain_get_32(error, &nmrep, ace_mask);
7943 nfsm_chain_get_32(error, &nmrep, len);
7944 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7945 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7946 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7947 if (!error && (len >= slen)) {
7948 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
7949 if (s) {
7950 slen = len + 1;
7951 } else {
7952 ace.ace_flags = 0;
7953 }
7954 }
7955 if (s) {
7956 nfsm_chain_get_opaque(error, &nmrep, len, s);
7957 } else {
7958 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
7959 }
7960 if (!error && s) {
7961 s[len] = '\0';
7962 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
7963 ace.ace_flags = 0;
7964 }
7965 }
7966 if (error || !s) {
7967 ace.ace_flags = 0;
7968 }
7969 if (s && (s != sbuf)) {
7970 FREE(s, M_TEMP);
7971 }
7972 break;
7973 default:
7974 error = EBADRPC;
7975 break;
7976 }
7977 }
7978 /* At this point if we have no error, the object was created/opened. */
7979 open_error = error;
7980 } else {
7981 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
7982 }
7983 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7984 nfsmout_if(error);
7985 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7986 nfsmout_if(error);
7987 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7988 error = EIO;
7989 goto nfsmout;
7990 }
7991 if (prefetch) {
7992 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7993 }
7994 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7995 if (!hadattrdir) {
7996 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7997 }
7998 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7999 nfsmout_if(error);
8000 xid = savedxid;
8001 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8002 nfsmout_if(error);
8003
8004 if (open) {
8005 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8006 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8007 }
8008 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8009 if (adnp) {
8010 nfs_node_unlock(adnp);
8011 adlockerror = ENOENT;
8012 }
8013 NVATTR_CLEANUP(&nvattr);
8014 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
8015 nfsmout_if(error);
8016 savedxid = xid;
8017 if ((adlockerror = nfs_node_lock(adnp))) {
8018 error = adlockerror;
8019 }
8020 }
8021 }
8022
8023 nfsmout:
8024 if (open && adnp && !adlockerror) {
8025 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8026 adnp->n_flag &= ~NNEGNCENTRIES;
8027 cache_purge_negatives(NFSTOV(adnp));
8028 }
8029 adnp->n_flag |= NMODIFIED;
8030 nfs_node_unlock(adnp);
8031 adlockerror = ENOENT;
8032 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8033 }
8034 if (adnp && !adlockerror && (error == ENOENT) &&
8035 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8036 /* add a negative entry in the name cache */
8037 cache_enter(NFSTOV(adnp), NULL, cnp);
8038 adnp->n_flag |= NNEGNCENTRIES;
8039 }
8040 if (adnp && !adlockerror) {
8041 nfs_node_unlock(adnp);
8042 adlockerror = ENOENT;
8043 }
8044 if (!error && !anp && fh.fh_len) {
8045 /* create the vnode with the filehandle and attributes */
8046 xid = savedxid;
8047 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
8048 if (!error) {
8049 *anpp = anp;
8050 nfs_node_unlock(anp);
8051 }
8052 if (!error && open) {
8053 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8054 /* After we have a node, add our open file struct to the node */
8055 nofp = newnofp;
8056 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8057 if (error) {
8058 /* This shouldn't happen, because we passed in a new nofp to use. */
8059 printf("nfs_open_file_find_internal failed! %d\n", error);
8060 nofp = NULL;
8061 } else if (nofp != newnofp) {
8062 /*
8063 * Hmm... an open file struct already exists.
8064 * Mark the existing one busy and merge our open into it.
8065 * Then destroy the one we created.
8066 * Note: there's no chance of an open confict because the
8067 * open has already been granted.
8068 */
8069 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8070 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8071 nofp->nof_stateid = newnofp->nof_stateid;
8072 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8073 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8074 }
8075 nfs_open_file_clear_busy(newnofp);
8076 nfs_open_file_destroy(newnofp);
8077 newnofp = NULL;
8078 }
8079 if (!error) {
8080 newnofp = NULL;
8081 nofpbusyerror = 0;
8082 /* mark the node as holding a create-initiated open */
8083 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8084 nofp->nof_creator = current_thread();
8085 if (nofpp) {
8086 *nofpp = nofp;
8087 }
8088 }
8089 }
8090 }
8091 NVATTR_CLEANUP(&nvattr);
8092 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8093 if (!error && anp && !recall) {
8094 /* stuff the delegation state in the node */
8095 lck_mtx_lock(&anp->n_openlock);
8096 anp->n_openflags &= ~N_DELEG_MASK;
8097 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8098 anp->n_dstateid = dstateid;
8099 anp->n_dace = ace;
8100 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8101 lck_mtx_lock(&nmp->nm_lock);
8102 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8103 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8104 }
8105 lck_mtx_unlock(&nmp->nm_lock);
8106 }
8107 lck_mtx_unlock(&anp->n_openlock);
8108 } else {
8109 /* give the delegation back */
8110 if (anp) {
8111 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
8112 /* update delegation state and return it */
8113 lck_mtx_lock(&anp->n_openlock);
8114 anp->n_openflags &= ~N_DELEG_MASK;
8115 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8116 anp->n_dstateid = dstateid;
8117 anp->n_dace = ace;
8118 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8119 lck_mtx_lock(&nmp->nm_lock);
8120 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8121 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8122 }
8123 lck_mtx_unlock(&nmp->nm_lock);
8124 }
8125 lck_mtx_unlock(&anp->n_openlock);
8126 /* don't need to send a separate delegreturn for fh */
8127 fh.fh_len = 0;
8128 }
8129 /* return anp's current delegation */
8130 nfs4_delegation_return(anp, 0, thd, cred);
8131 }
8132 if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
8133 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
8134 }
8135 }
8136 }
8137 if (open) {
8138 if (newnofp) {
8139 /* need to cleanup our temporary nofp */
8140 nfs_open_file_clear_busy(newnofp);
8141 nfs_open_file_destroy(newnofp);
8142 newnofp = NULL;
8143 } else if (nofp && !nofpbusyerror) {
8144 nfs_open_file_clear_busy(nofp);
8145 nofpbusyerror = ENOENT;
8146 }
8147 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8148 inuse = 0;
8149 nofp = newnofp = NULL;
8150 rflags = delegation = recall = eof = rlen = retlen = 0;
8151 ace.ace_flags = 0;
8152 s = sbuf;
8153 slen = sizeof(sbuf);
8154 nfsm_chain_cleanup(&nmreq);
8155 nfsm_chain_cleanup(&nmrep);
8156 if (anp) {
8157 vnode_put(NFSTOV(anp));
8158 *anpp = anp = NULL;
8159 }
8160 hadattrdir = (adnp != NULL);
8161 if (noopbusy) {
8162 nfs_open_owner_clear_busy(noop);
8163 noopbusy = 0;
8164 }
8165 goto restart;
8166 }
8167 if (noop) {
8168 if (noopbusy) {
8169 nfs_open_owner_clear_busy(noop);
8170 noopbusy = 0;
8171 }
8172 nfs_open_owner_rele(noop);
8173 }
8174 }
8175 if (!error && prefetch && nmrep.nmc_mhead) {
8176 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8177 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8178 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8179 nfsm_chain_get_32(error, &nmrep, eof);
8180 nfsm_chain_get_32(error, &nmrep, retlen);
8181 if (!error && anp) {
8182 /*
8183 * There can be one problem with doing the prefetch.
8184 * Because we don't have the node before we start the RPC, we
8185 * can't have the buffer busy while the READ is performed.
8186 * So there is a chance that other I/O occured on the same
8187 * range of data while we were performing this RPC. If that
8188 * happens, then it's possible the data we have in the READ
8189 * response is no longer up to date.
8190 * Once we have the node and the buffer, we need to make sure
8191 * that there's no chance we could be putting stale data in
8192 * the buffer.
8193 * So, we check if the range read is dirty or if any I/O may
8194 * have occured on it while we were performing our RPC.
8195 */
8196 struct nfsbuf *bp = NULL;
8197 int lastpg;
8198 uint32_t pagemask;
8199
8200 retlen = MIN(retlen, rlen);
8201
8202 /* check if node needs size update or invalidation */
8203 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8204 nfs_data_update_size(anp, 0);
8205 }
8206 if (!(error = nfs_node_lock(anp))) {
8207 if (anp->n_flag & NNEEDINVALIDATE) {
8208 anp->n_flag &= ~NNEEDINVALIDATE;
8209 nfs_node_unlock(anp);
8210 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8211 if (!error) { /* lets play it safe and just drop the data */
8212 error = EIO;
8213 }
8214 } else {
8215 nfs_node_unlock(anp);
8216 }
8217 }
8218
8219 /* calculate page mask for the range of data read */
8220 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
8221 pagemask = ((1 << (lastpg + 1)) - 1);
8222
8223 if (!error) {
8224 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8225 }
8226 /* don't save the data if dirty or potential I/O conflict */
8227 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
8228 timevalcmp(&anp->n_lastio, &now, <)) {
8229 OSAddAtomic64(1, &nfsstats.read_bios);
8230 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8231 SET(bp->nb_flags, NB_READ);
8232 NFS_BUF_MAP(bp);
8233 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8234 if (error) {
8235 bp->nb_error = error;
8236 SET(bp->nb_flags, NB_ERROR);
8237 } else {
8238 bp->nb_offio = 0;
8239 bp->nb_endio = rlen;
8240 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8241 bp->nb_endio = retlen;
8242 }
8243 if (eof || (retlen == 0)) {
8244 /* zero out the remaining data (up to EOF) */
8245 off_t rpcrem, eofrem, rem;
8246 rpcrem = (rlen - retlen);
8247 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8248 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8249 if (rem > 0) {
8250 bzero(bp->nb_data + retlen, rem);
8251 }
8252 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8253 /* ugh... short read ... just invalidate for now... */
8254 SET(bp->nb_flags, NB_INVAL);
8255 }
8256 }
8257 nfs_buf_read_finish(bp);
8258 microuptime(&anp->n_lastio);
8259 }
8260 if (bp) {
8261 nfs_buf_release(bp, 1);
8262 }
8263 }
8264 error = 0; /* ignore any transient error in processing the prefetch */
8265 }
8266 if (adnp && !adbusyerror) {
8267 nfs_node_clear_busy(adnp);
8268 adbusyerror = ENOENT;
8269 }
8270 if (!busyerror) {
8271 nfs_node_clear_busy(np);
8272 busyerror = ENOENT;
8273 }
8274 if (adnp) {
8275 vnode_put(NFSTOV(adnp));
8276 }
8277 if (error && *anpp) {
8278 vnode_put(NFSTOV(*anpp));
8279 *anpp = NULL;
8280 }
8281 nfsm_chain_cleanup(&nmreq);
8282 nfsm_chain_cleanup(&nmrep);
8283 return error;
8284 }
8285
8286 /*
8287 * Remove a named attribute.
8288 */
8289 int
8290 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8291 {
8292 nfsnode_t adnp = NULL;
8293 struct nfsmount *nmp;
8294 struct componentname cn;
8295 struct vnop_remove_args vra;
8296 int error, putanp = 0;
8297
8298 nmp = NFSTONMP(np);
8299 if (nfs_mount_gone(nmp)) {
8300 return ENXIO;
8301 }
8302
8303 bzero(&cn, sizeof(cn));
8304 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8305 cn.cn_namelen = strlen(name);
8306 cn.cn_nameiop = DELETE;
8307 cn.cn_flags = 0;
8308
8309 if (!anp) {
8310 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8311 0, ctx, &anp, NULL);
8312 if ((!error && !anp) || (error == ENOATTR)) {
8313 error = ENOENT;
8314 }
8315 if (error) {
8316 if (anp) {
8317 vnode_put(NFSTOV(anp));
8318 anp = NULL;
8319 }
8320 goto out;
8321 }
8322 putanp = 1;
8323 }
8324
8325 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8326 goto out;
8327 }
8328 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8329 nfs_node_clear_busy(np);
8330 if (!adnp) {
8331 error = ENOENT;
8332 goto out;
8333 }
8334
8335 vra.a_desc = &vnop_remove_desc;
8336 vra.a_dvp = NFSTOV(adnp);
8337 vra.a_vp = NFSTOV(anp);
8338 vra.a_cnp = &cn;
8339 vra.a_flags = 0;
8340 vra.a_context = ctx;
8341 error = nfs_vnop_remove(&vra);
8342 out:
8343 if (adnp) {
8344 vnode_put(NFSTOV(adnp));
8345 }
8346 if (putanp) {
8347 vnode_put(NFSTOV(anp));
8348 }
8349 return error;
8350 }
8351
8352 int
8353 nfs4_vnop_getxattr(
8354 struct vnop_getxattr_args /* {
8355 * struct vnodeop_desc *a_desc;
8356 * vnode_t a_vp;
8357 * const char * a_name;
8358 * uio_t a_uio;
8359 * size_t *a_size;
8360 * int a_options;
8361 * vfs_context_t a_context;
8362 * } */*ap)
8363 {
8364 vfs_context_t ctx = ap->a_context;
8365 struct nfsmount *nmp;
8366 struct nfs_vattr nvattr;
8367 struct componentname cn;
8368 nfsnode_t anp;
8369 int error = 0, isrsrcfork;
8370
8371 nmp = VTONMP(ap->a_vp);
8372 if (nfs_mount_gone(nmp)) {
8373 return ENXIO;
8374 }
8375
8376 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8377 return ENOTSUP;
8378 }
8379 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8380 if (error) {
8381 return error;
8382 }
8383 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8384 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8385 return ENOATTR;
8386 }
8387
8388 bzero(&cn, sizeof(cn));
8389 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8390 cn.cn_namelen = strlen(ap->a_name);
8391 cn.cn_nameiop = LOOKUP;
8392 cn.cn_flags = MAKEENTRY;
8393
8394 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8395 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8396
8397 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8398 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8399 if ((!error && !anp) || (error == ENOENT)) {
8400 error = ENOATTR;
8401 }
8402 if (!error) {
8403 if (ap->a_uio) {
8404 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8405 } else {
8406 *ap->a_size = anp->n_size;
8407 }
8408 }
8409 if (anp) {
8410 vnode_put(NFSTOV(anp));
8411 }
8412 return error;
8413 }
8414
8415 int
8416 nfs4_vnop_setxattr(
8417 struct vnop_setxattr_args /* {
8418 * struct vnodeop_desc *a_desc;
8419 * vnode_t a_vp;
8420 * const char * a_name;
8421 * uio_t a_uio;
8422 * int a_options;
8423 * vfs_context_t a_context;
8424 * } */*ap)
8425 {
8426 vfs_context_t ctx = ap->a_context;
8427 int options = ap->a_options;
8428 uio_t uio = ap->a_uio;
8429 const char *name = ap->a_name;
8430 struct nfsmount *nmp;
8431 struct componentname cn;
8432 nfsnode_t anp = NULL;
8433 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8434 #define FINDERINFOSIZE 32
8435 uint8_t finfo[FINDERINFOSIZE];
8436 uint32_t *finfop;
8437 struct nfs_open_file *nofp = NULL;
8438 char uio_buf[UIO_SIZEOF(1)];
8439 uio_t auio;
8440 struct vnop_write_args vwa;
8441
8442 nmp = VTONMP(ap->a_vp);
8443 if (nfs_mount_gone(nmp)) {
8444 return ENXIO;
8445 }
8446
8447 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8448 return ENOTSUP;
8449 }
8450
8451 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8452 return EINVAL;
8453 }
8454
8455 /* XXX limitation based on need to back up uio on short write */
8456 if (uio_iovcnt(uio) > 1) {
8457 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8458 return EINVAL;
8459 }
8460
8461 bzero(&cn, sizeof(cn));
8462 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8463 cn.cn_namelen = strlen(name);
8464 cn.cn_nameiop = CREATE;
8465 cn.cn_flags = MAKEENTRY;
8466
8467 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8468 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8469 if (!isrsrcfork) {
8470 uio_setoffset(uio, 0);
8471 }
8472 if (isfinderinfo) {
8473 if (uio_resid(uio) != sizeof(finfo)) {
8474 return ERANGE;
8475 }
8476 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8477 if (error) {
8478 return error;
8479 }
8480 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8481 empty = 1;
8482 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8483 if (finfop[i]) {
8484 empty = 0;
8485 break;
8486 }
8487 }
8488 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8489 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8490 if (error == ENOENT) {
8491 error = 0;
8492 }
8493 return error;
8494 }
8495 /* first, let's see if we get a create/replace error */
8496 }
8497
8498 /*
8499 * create/open the xattr
8500 *
8501 * We need to make sure not to create it if XATTR_REPLACE.
8502 * For all xattrs except the resource fork, we also want to
8503 * truncate the xattr to remove any current data. We'll do
8504 * that by setting the size to 0 on create/open.
8505 */
8506 flags = 0;
8507 if (!(options & XATTR_REPLACE)) {
8508 flags |= NFS_GET_NAMED_ATTR_CREATE;
8509 }
8510 if (options & XATTR_CREATE) {
8511 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8512 }
8513 if (!isrsrcfork) {
8514 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8515 }
8516
8517 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8518 flags, ctx, &anp, &nofp);
8519 if (!error && !anp) {
8520 error = ENOATTR;
8521 }
8522 if (error) {
8523 goto out;
8524 }
8525 /* grab the open state from the get/create/open */
8526 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8527 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8528 nofp->nof_creator = NULL;
8529 nfs_open_file_clear_busy(nofp);
8530 }
8531
8532 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8533 if (isfinderinfo && empty) {
8534 goto doclose;
8535 }
8536
8537 /*
8538 * Write the data out and flush.
8539 *
8540 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8541 */
8542 vwa.a_desc = &vnop_write_desc;
8543 vwa.a_vp = NFSTOV(anp);
8544 vwa.a_uio = NULL;
8545 vwa.a_ioflag = 0;
8546 vwa.a_context = ctx;
8547 if (isfinderinfo) {
8548 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8549 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8550 vwa.a_uio = auio;
8551 } else if (uio_resid(uio) > 0) {
8552 vwa.a_uio = uio;
8553 }
8554 if (vwa.a_uio) {
8555 error = nfs_vnop_write(&vwa);
8556 if (!error) {
8557 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8558 }
8559 }
8560 doclose:
8561 /* Close the xattr. */
8562 if (nofp) {
8563 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8564 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8565 if (!busyerror) {
8566 nfs_open_file_clear_busy(nofp);
8567 }
8568 }
8569 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8570 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8571 if (error == ENOENT) {
8572 error = 0;
8573 }
8574 }
8575 if (!error) {
8576 error = closeerror;
8577 }
8578 out:
8579 if (anp) {
8580 vnode_put(NFSTOV(anp));
8581 }
8582 if (error == ENOENT) {
8583 error = ENOATTR;
8584 }
8585 return error;
8586 }
8587
8588 int
8589 nfs4_vnop_removexattr(
8590 struct vnop_removexattr_args /* {
8591 * struct vnodeop_desc *a_desc;
8592 * vnode_t a_vp;
8593 * const char * a_name;
8594 * int a_options;
8595 * vfs_context_t a_context;
8596 * } */*ap)
8597 {
8598 struct nfsmount *nmp = VTONMP(ap->a_vp);
8599 int error;
8600
8601 if (nfs_mount_gone(nmp)) {
8602 return ENXIO;
8603 }
8604 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8605 return ENOTSUP;
8606 }
8607
8608 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8609 if (error == ENOENT) {
8610 error = ENOATTR;
8611 }
8612 return error;
8613 }
8614
8615 int
8616 nfs4_vnop_listxattr(
8617 struct vnop_listxattr_args /* {
8618 * struct vnodeop_desc *a_desc;
8619 * vnode_t a_vp;
8620 * uio_t a_uio;
8621 * size_t *a_size;
8622 * int a_options;
8623 * vfs_context_t a_context;
8624 * } */*ap)
8625 {
8626 vfs_context_t ctx = ap->a_context;
8627 nfsnode_t np = VTONFS(ap->a_vp);
8628 uio_t uio = ap->a_uio;
8629 nfsnode_t adnp = NULL;
8630 struct nfsmount *nmp;
8631 int error, done, i;
8632 struct nfs_vattr nvattr;
8633 uint64_t cookie, nextcookie, lbn = 0;
8634 struct nfsbuf *bp = NULL;
8635 struct nfs_dir_buf_header *ndbhp;
8636 struct direntry *dp;
8637
8638 nmp = VTONMP(ap->a_vp);
8639 if (nfs_mount_gone(nmp)) {
8640 return ENXIO;
8641 }
8642
8643 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8644 return ENOTSUP;
8645 }
8646
8647 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8648 if (error) {
8649 return error;
8650 }
8651 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8652 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8653 return 0;
8654 }
8655
8656 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8657 return error;
8658 }
8659 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8660 nfs_node_clear_busy(np);
8661 if (!adnp) {
8662 goto out;
8663 }
8664
8665 if ((error = nfs_node_lock(adnp))) {
8666 goto out;
8667 }
8668
8669 if (adnp->n_flag & NNEEDINVALIDATE) {
8670 adnp->n_flag &= ~NNEEDINVALIDATE;
8671 nfs_invaldir(adnp);
8672 nfs_node_unlock(adnp);
8673 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8674 if (!error) {
8675 error = nfs_node_lock(adnp);
8676 }
8677 if (error) {
8678 goto out;
8679 }
8680 }
8681
8682 /*
8683 * check for need to invalidate when (re)starting at beginning
8684 */
8685 if (adnp->n_flag & NMODIFIED) {
8686 nfs_invaldir(adnp);
8687 nfs_node_unlock(adnp);
8688 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
8689 goto out;
8690 }
8691 } else {
8692 nfs_node_unlock(adnp);
8693 }
8694 /* nfs_getattr() will check changed and purge caches */
8695 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
8696 goto out;
8697 }
8698
8699 if (uio && (uio_resid(uio) == 0)) {
8700 goto out;
8701 }
8702
8703 done = 0;
8704 nextcookie = lbn = 0;
8705
8706 while (!error && !done) {
8707 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8708 cookie = nextcookie;
8709 getbuffer:
8710 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8711 if (error) {
8712 goto out;
8713 }
8714 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8715 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8716 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8717 ndbhp->ndbh_flags = 0;
8718 ndbhp->ndbh_count = 0;
8719 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8720 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8721 }
8722 error = nfs_buf_readdir(bp, ctx);
8723 if (error == NFSERR_DIRBUFDROPPED) {
8724 goto getbuffer;
8725 }
8726 if (error) {
8727 nfs_buf_release(bp, 1);
8728 }
8729 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8730 if (!nfs_node_lock(adnp)) {
8731 nfs_invaldir(adnp);
8732 nfs_node_unlock(adnp);
8733 }
8734 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8735 if (error == NFSERR_BAD_COOKIE) {
8736 error = ENOENT;
8737 }
8738 }
8739 if (error) {
8740 goto out;
8741 }
8742 }
8743
8744 /* go through all the entries copying/counting */
8745 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8746 for (i = 0; i < ndbhp->ndbh_count; i++) {
8747 if (!xattr_protected(dp->d_name)) {
8748 if (uio == NULL) {
8749 *ap->a_size += dp->d_namlen + 1;
8750 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8751 error = ERANGE;
8752 } else {
8753 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8754 if (error && (error != EFAULT)) {
8755 error = ERANGE;
8756 }
8757 }
8758 }
8759 nextcookie = dp->d_seekoff;
8760 dp = NFS_DIRENTRY_NEXT(dp);
8761 }
8762
8763 if (i == ndbhp->ndbh_count) {
8764 /* hit end of buffer, move to next buffer */
8765 lbn = nextcookie;
8766 /* if we also hit EOF, we're done */
8767 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
8768 done = 1;
8769 }
8770 }
8771 if (!error && !done && (nextcookie == cookie)) {
8772 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8773 error = EIO;
8774 }
8775 nfs_buf_release(bp, 1);
8776 }
8777 out:
8778 if (adnp) {
8779 vnode_put(NFSTOV(adnp));
8780 }
8781 return error;
8782 }
8783
8784 #if NAMEDSTREAMS
8785 int
8786 nfs4_vnop_getnamedstream(
8787 struct vnop_getnamedstream_args /* {
8788 * struct vnodeop_desc *a_desc;
8789 * vnode_t a_vp;
8790 * vnode_t *a_svpp;
8791 * const char *a_name;
8792 * enum nsoperation a_operation;
8793 * int a_flags;
8794 * vfs_context_t a_context;
8795 * } */*ap)
8796 {
8797 vfs_context_t ctx = ap->a_context;
8798 struct nfsmount *nmp;
8799 struct nfs_vattr nvattr;
8800 struct componentname cn;
8801 nfsnode_t anp;
8802 int error = 0;
8803
8804 nmp = VTONMP(ap->a_vp);
8805 if (nfs_mount_gone(nmp)) {
8806 return ENXIO;
8807 }
8808
8809 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8810 return ENOTSUP;
8811 }
8812 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8813 if (error) {
8814 return error;
8815 }
8816 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8817 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8818 return ENOATTR;
8819 }
8820
8821 bzero(&cn, sizeof(cn));
8822 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8823 cn.cn_namelen = strlen(ap->a_name);
8824 cn.cn_nameiop = LOOKUP;
8825 cn.cn_flags = MAKEENTRY;
8826
8827 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8828 0, ctx, &anp, NULL);
8829 if ((!error && !anp) || (error == ENOENT)) {
8830 error = ENOATTR;
8831 }
8832 if (!error && anp) {
8833 *ap->a_svpp = NFSTOV(anp);
8834 } else if (anp) {
8835 vnode_put(NFSTOV(anp));
8836 }
8837 return error;
8838 }
8839
8840 int
8841 nfs4_vnop_makenamedstream(
8842 struct vnop_makenamedstream_args /* {
8843 * struct vnodeop_desc *a_desc;
8844 * vnode_t *a_svpp;
8845 * vnode_t a_vp;
8846 * const char *a_name;
8847 * int a_flags;
8848 * vfs_context_t a_context;
8849 * } */*ap)
8850 {
8851 vfs_context_t ctx = ap->a_context;
8852 struct nfsmount *nmp;
8853 struct componentname cn;
8854 nfsnode_t anp;
8855 int error = 0;
8856
8857 nmp = VTONMP(ap->a_vp);
8858 if (nfs_mount_gone(nmp)) {
8859 return ENXIO;
8860 }
8861
8862 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8863 return ENOTSUP;
8864 }
8865
8866 bzero(&cn, sizeof(cn));
8867 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8868 cn.cn_namelen = strlen(ap->a_name);
8869 cn.cn_nameiop = CREATE;
8870 cn.cn_flags = MAKEENTRY;
8871
8872 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8873 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8874 if ((!error && !anp) || (error == ENOENT)) {
8875 error = ENOATTR;
8876 }
8877 if (!error && anp) {
8878 *ap->a_svpp = NFSTOV(anp);
8879 } else if (anp) {
8880 vnode_put(NFSTOV(anp));
8881 }
8882 return error;
8883 }
8884
8885 int
8886 nfs4_vnop_removenamedstream(
8887 struct vnop_removenamedstream_args /* {
8888 * struct vnodeop_desc *a_desc;
8889 * vnode_t a_vp;
8890 * vnode_t a_svp;
8891 * const char *a_name;
8892 * int a_flags;
8893 * vfs_context_t a_context;
8894 * } */*ap)
8895 {
8896 struct nfsmount *nmp = VTONMP(ap->a_vp);
8897 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8898 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8899
8900 if (nfs_mount_gone(nmp)) {
8901 return ENXIO;
8902 }
8903
8904 /*
8905 * Given that a_svp is a named stream, checking for
8906 * named attribute support is kinda pointless.
8907 */
8908 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8909 return ENOTSUP;
8910 }
8911
8912 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
8913 }
8914
8915 #endif