]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs4_vnops.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs4_vnops.c
1 /*
2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <nfs/nfs_conf.h>
30 #if CONFIG_NFS_CLIENT
31
32 /*
33 * vnode op calls for NFS version 4
34 */
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/resourcevar.h>
39 #include <sys/proc_internal.h>
40 #include <sys/kauth.h>
41 #include <sys/mount_internal.h>
42 #include <sys/malloc.h>
43 #include <sys/kpi_mbuf.h>
44 #include <sys/conf.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/dirent.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49 #include <sys/ubc_internal.h>
50 #include <sys/attr.h>
51 #include <sys/signalvar.h>
52 #include <sys/uio_internal.h>
53 #include <sys/xattr.h>
54 #include <sys/paths.h>
55
56 #include <vfs/vfs_support.h>
57
58 #include <sys/vm.h>
59
60 #include <sys/time.h>
61 #include <kern/clock.h>
62 #include <libkern/OSAtomic.h>
63
64 #include <miscfs/fifofs/fifo.h>
65 #include <miscfs/specfs/specdev.h>
66
67 #include <nfs/rpcv2.h>
68 #include <nfs/nfsproto.h>
69 #include <nfs/nfs.h>
70 #include <nfs/nfsnode.h>
71 #include <nfs/nfs_gss.h>
72 #include <nfs/nfsmount.h>
73 #include <nfs/nfs_lock.h>
74 #include <nfs/xdr_subs.h>
75 #include <nfs/nfsm_subs.h>
76
77 #include <net/if.h>
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <vm/vm_kern.h>
81
82 #include <kern/task.h>
83 #include <kern/sched_prim.h>
84
85 #if CONFIG_NFS4
86 int
87 nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx)
88 {
89 int error = 0, lockerror = ENOENT, status, numops, slot;
90 u_int64_t xid;
91 struct nfsm_chain nmreq, nmrep;
92 struct timeval now;
93 uint32_t access_result = 0, supported = 0, missing;
94 struct nfsmount *nmp = NFSTONMP(np);
95 int nfsvers = nmp->nm_vers;
96 uid_t uid;
97 struct nfsreq_secinfo_args si;
98
99 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
100 return 0;
101 }
102
103 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
104 nfsm_chain_null(&nmreq);
105 nfsm_chain_null(&nmrep);
106
107 // PUTFH, ACCESS, GETATTR
108 numops = 3;
109 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED);
110 nfsm_chain_add_compound_header(error, &nmreq, "access", nmp->nm_minor_vers, numops);
111 numops--;
112 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
113 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
114 numops--;
115 nfsm_chain_add_32(error, &nmreq, NFS_OP_ACCESS);
116 nfsm_chain_add_32(error, &nmreq, *access);
117 numops--;
118 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
119 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
120 nfsm_chain_build_done(error, &nmreq);
121 nfsm_assert(error, (numops == 0), EPROTO);
122 nfsmout_if(error);
123 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
124 vfs_context_thread(ctx), vfs_context_ucred(ctx),
125 &si, rpcflags, &nmrep, &xid, &status);
126
127 if ((lockerror = nfs_node_lock(np))) {
128 error = lockerror;
129 }
130 nfsm_chain_skip_tag(error, &nmrep);
131 nfsm_chain_get_32(error, &nmrep, numops);
132 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
133 nfsm_chain_op_check(error, &nmrep, NFS_OP_ACCESS);
134 nfsm_chain_get_32(error, &nmrep, supported);
135 nfsm_chain_get_32(error, &nmrep, access_result);
136 nfsmout_if(error);
137 if ((missing = (*access & ~supported))) {
138 /* missing support for something(s) we wanted */
139 if (missing & NFS_ACCESS_DELETE) {
140 /*
141 * If the server doesn't report DELETE (possible
142 * on UNIX systems), we'll assume that it is OK
143 * and just let any subsequent delete action fail
144 * if it really isn't deletable.
145 */
146 access_result |= NFS_ACCESS_DELETE;
147 }
148 }
149 /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */
150 if (nfs_access_dotzfs) {
151 vnode_t dvp = NULLVP;
152 if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */
153 access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE);
154 } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) {
155 access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */
156 }
157 if (dvp != NULLVP) {
158 vnode_put(dvp);
159 }
160 }
161 /* Some servers report DELETE support but erroneously give a denied answer. */
162 if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) {
163 access_result |= NFS_ACCESS_DELETE;
164 }
165 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
166 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
167 nfsmout_if(error);
168
169 if (nfs_mount_gone(nmp)) {
170 error = ENXIO;
171 }
172 nfsmout_if(error);
173
174 if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) {
175 uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx));
176 } else {
177 uid = kauth_cred_getuid(vfs_context_ucred(ctx));
178 }
179 slot = nfs_node_access_slot(np, uid, 1);
180 np->n_accessuid[slot] = uid;
181 microuptime(&now);
182 np->n_accessstamp[slot] = now.tv_sec;
183 np->n_access[slot] = access_result;
184
185 /* pass back the access returned with this request */
186 *access = np->n_access[slot];
187 nfsmout:
188 if (!lockerror) {
189 nfs_node_unlock(np);
190 }
191 nfsm_chain_cleanup(&nmreq);
192 nfsm_chain_cleanup(&nmrep);
193 return error;
194 }
195
196 int
197 nfs4_getattr_rpc(
198 nfsnode_t np,
199 mount_t mp,
200 u_char *fhp,
201 size_t fhsize,
202 int flags,
203 vfs_context_t ctx,
204 struct nfs_vattr *nvap,
205 u_int64_t *xidp)
206 {
207 struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np);
208 int error = 0, status, nfsvers, numops, rpcflags = 0, acls;
209 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
210 struct nfsm_chain nmreq, nmrep;
211 struct nfsreq_secinfo_args si;
212
213 if (nfs_mount_gone(nmp)) {
214 return ENXIO;
215 }
216 nfsvers = nmp->nm_vers;
217 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
218
219 if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) {
220 nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL);
221 return 0;
222 }
223
224 if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */
225 rpcflags = R_RECOVER;
226 }
227
228 if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */
229 rpcflags |= R_SOFT;
230 }
231
232 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
233 nfsm_chain_null(&nmreq);
234 nfsm_chain_null(&nmrep);
235
236 // PUTFH, GETATTR
237 numops = 2;
238 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
239 nfsm_chain_add_compound_header(error, &nmreq, "getattr", nmp->nm_minor_vers, numops);
240 numops--;
241 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
242 nfsm_chain_add_fh(error, &nmreq, nfsvers, fhp, fhsize);
243 numops--;
244 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
245 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
246 if ((flags & NGA_ACL) && acls) {
247 NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL);
248 }
249 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
250 nfsm_chain_build_done(error, &nmreq);
251 nfsm_assert(error, (numops == 0), EPROTO);
252 nfsmout_if(error);
253 error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND,
254 vfs_context_thread(ctx), vfs_context_ucred(ctx),
255 NULL, rpcflags, &nmrep, xidp, &status);
256
257 nfsm_chain_skip_tag(error, &nmrep);
258 nfsm_chain_get_32(error, &nmrep, numops);
259 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
260 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
261 nfsmout_if(error);
262 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
263 nfsmout_if(error);
264 if ((flags & NGA_ACL) && acls && !NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
265 /* we asked for the ACL but didn't get one... assume there isn't one */
266 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_ACL);
267 nvap->nva_acl = NULL;
268 }
269 nfsmout:
270 nfsm_chain_cleanup(&nmreq);
271 nfsm_chain_cleanup(&nmrep);
272 return error;
273 }
274
275 int
276 nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx)
277 {
278 struct nfsmount *nmp;
279 int error = 0, lockerror = ENOENT, status, numops;
280 uint32_t len = 0;
281 u_int64_t xid;
282 struct nfsm_chain nmreq, nmrep;
283 struct nfsreq_secinfo_args si;
284
285 nmp = NFSTONMP(np);
286 if (nfs_mount_gone(nmp)) {
287 return ENXIO;
288 }
289 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
290 return EINVAL;
291 }
292 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
293 nfsm_chain_null(&nmreq);
294 nfsm_chain_null(&nmrep);
295
296 // PUTFH, GETATTR, READLINK
297 numops = 3;
298 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
299 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
300 numops--;
301 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
302 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
303 numops--;
304 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
305 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
306 numops--;
307 nfsm_chain_add_32(error, &nmreq, NFS_OP_READLINK);
308 nfsm_chain_build_done(error, &nmreq);
309 nfsm_assert(error, (numops == 0), EPROTO);
310 nfsmout_if(error);
311 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
312
313 if ((lockerror = nfs_node_lock(np))) {
314 error = lockerror;
315 }
316 nfsm_chain_skip_tag(error, &nmrep);
317 nfsm_chain_get_32(error, &nmrep, numops);
318 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
319 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
320 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
321 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
322 nfsm_chain_get_32(error, &nmrep, len);
323 nfsmout_if(error);
324 if (len >= *buflenp) {
325 if (np->n_size && (np->n_size < *buflenp)) {
326 len = np->n_size;
327 } else {
328 len = *buflenp - 1;
329 }
330 }
331 nfsm_chain_get_opaque(error, &nmrep, len, buf);
332 if (!error) {
333 *buflenp = len;
334 }
335 nfsmout:
336 if (!lockerror) {
337 nfs_node_unlock(np);
338 }
339 nfsm_chain_cleanup(&nmreq);
340 nfsm_chain_cleanup(&nmrep);
341 return error;
342 }
343
344 int
345 nfs4_read_rpc_async(
346 nfsnode_t np,
347 off_t offset,
348 size_t len,
349 thread_t thd,
350 kauth_cred_t cred,
351 struct nfsreq_cbinfo *cb,
352 struct nfsreq **reqp)
353 {
354 struct nfsmount *nmp;
355 int error = 0, nfsvers, numops;
356 nfs_stateid stateid;
357 struct nfsm_chain nmreq;
358 struct nfsreq_secinfo_args si;
359
360 nmp = NFSTONMP(np);
361 if (nfs_mount_gone(nmp)) {
362 return ENXIO;
363 }
364 nfsvers = nmp->nm_vers;
365 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
366 return EINVAL;
367 }
368
369 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
370 nfsm_chain_null(&nmreq);
371
372 // PUTFH, READ, GETATTR
373 numops = 3;
374 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
375 nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops);
376 numops--;
377 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
378 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
379 numops--;
380 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
381 nfs_get_stateid(np, thd, cred, &stateid);
382 nfsm_chain_add_stateid(error, &nmreq, &stateid);
383 nfsm_chain_add_64(error, &nmreq, offset);
384 nfsm_chain_add_32(error, &nmreq, len);
385 numops--;
386 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
387 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
388 nfsm_chain_build_done(error, &nmreq);
389 nfsm_assert(error, (numops == 0), EPROTO);
390 nfsmout_if(error);
391 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
392 nfsmout:
393 nfsm_chain_cleanup(&nmreq);
394 return error;
395 }
396
397 int
398 nfs4_read_rpc_async_finish(
399 nfsnode_t np,
400 struct nfsreq *req,
401 uio_t uio,
402 size_t *lenp,
403 int *eofp)
404 {
405 struct nfsmount *nmp;
406 int error = 0, lockerror, nfsvers, numops, status, eof = 0;
407 size_t retlen = 0;
408 u_int64_t xid;
409 struct nfsm_chain nmrep;
410
411 nmp = NFSTONMP(np);
412 if (nfs_mount_gone(nmp)) {
413 nfs_request_async_cancel(req);
414 return ENXIO;
415 }
416 nfsvers = nmp->nm_vers;
417
418 nfsm_chain_null(&nmrep);
419
420 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
421 if (error == EINPROGRESS) { /* async request restarted */
422 return error;
423 }
424
425 if ((lockerror = nfs_node_lock(np))) {
426 error = lockerror;
427 }
428 nfsm_chain_skip_tag(error, &nmrep);
429 nfsm_chain_get_32(error, &nmrep, numops);
430 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
431 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
432 nfsm_chain_get_32(error, &nmrep, eof);
433 nfsm_chain_get_32(error, &nmrep, retlen);
434 if (!error) {
435 *lenp = MIN(retlen, *lenp);
436 error = nfsm_chain_get_uio(&nmrep, *lenp, uio);
437 }
438 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
439 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
440 if (!lockerror) {
441 nfs_node_unlock(np);
442 }
443 if (eofp) {
444 if (!eof && !retlen) {
445 eof = 1;
446 }
447 *eofp = eof;
448 }
449 nfsm_chain_cleanup(&nmrep);
450 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
451 microuptime(&np->n_lastio);
452 }
453 return error;
454 }
455
456 int
457 nfs4_write_rpc_async(
458 nfsnode_t np,
459 uio_t uio,
460 size_t len,
461 thread_t thd,
462 kauth_cred_t cred,
463 int iomode,
464 struct nfsreq_cbinfo *cb,
465 struct nfsreq **reqp)
466 {
467 struct nfsmount *nmp;
468 mount_t mp;
469 int error = 0, nfsvers, numops;
470 nfs_stateid stateid;
471 struct nfsm_chain nmreq;
472 struct nfsreq_secinfo_args si;
473
474 nmp = NFSTONMP(np);
475 if (nfs_mount_gone(nmp)) {
476 return ENXIO;
477 }
478 nfsvers = nmp->nm_vers;
479 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
480 return EINVAL;
481 }
482
483 /* for async mounts, don't bother sending sync write requests */
484 if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async &&
485 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
486 iomode = NFS_WRITE_UNSTABLE;
487 }
488
489 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
490 nfsm_chain_null(&nmreq);
491
492 // PUTFH, WRITE, GETATTR
493 numops = 3;
494 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED + len);
495 nfsm_chain_add_compound_header(error, &nmreq, "write", nmp->nm_minor_vers, numops);
496 numops--;
497 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
498 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
499 numops--;
500 nfsm_chain_add_32(error, &nmreq, NFS_OP_WRITE);
501 nfs_get_stateid(np, thd, cred, &stateid);
502 nfsm_chain_add_stateid(error, &nmreq, &stateid);
503 nfsm_chain_add_64(error, &nmreq, uio_offset(uio));
504 nfsm_chain_add_32(error, &nmreq, iomode);
505 nfsm_chain_add_32(error, &nmreq, len);
506 if (!error) {
507 error = nfsm_chain_add_uio(&nmreq, uio, len);
508 }
509 numops--;
510 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
511 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
512 nfsm_chain_build_done(error, &nmreq);
513 nfsm_assert(error, (numops == 0), EPROTO);
514 nfsmout_if(error);
515
516 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp);
517 nfsmout:
518 nfsm_chain_cleanup(&nmreq);
519 return error;
520 }
521
522 int
523 nfs4_write_rpc_async_finish(
524 nfsnode_t np,
525 struct nfsreq *req,
526 int *iomodep,
527 size_t *rlenp,
528 uint64_t *wverfp)
529 {
530 struct nfsmount *nmp;
531 int error = 0, lockerror = ENOENT, nfsvers, numops, status;
532 int committed = NFS_WRITE_FILESYNC;
533 size_t rlen = 0;
534 u_int64_t xid, wverf;
535 mount_t mp;
536 struct nfsm_chain nmrep;
537
538 nmp = NFSTONMP(np);
539 if (nfs_mount_gone(nmp)) {
540 nfs_request_async_cancel(req);
541 return ENXIO;
542 }
543 nfsvers = nmp->nm_vers;
544
545 nfsm_chain_null(&nmrep);
546
547 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
548 if (error == EINPROGRESS) { /* async request restarted */
549 return error;
550 }
551 nmp = NFSTONMP(np);
552 if (nfs_mount_gone(nmp)) {
553 error = ENXIO;
554 }
555 if (!error && (lockerror = nfs_node_lock(np))) {
556 error = lockerror;
557 }
558 nfsm_chain_skip_tag(error, &nmrep);
559 nfsm_chain_get_32(error, &nmrep, numops);
560 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
561 nfsm_chain_op_check(error, &nmrep, NFS_OP_WRITE);
562 nfsm_chain_get_32(error, &nmrep, rlen);
563 nfsmout_if(error);
564 *rlenp = rlen;
565 if (rlen <= 0) {
566 error = NFSERR_IO;
567 }
568 nfsm_chain_get_32(error, &nmrep, committed);
569 nfsm_chain_get_64(error, &nmrep, wverf);
570 nfsmout_if(error);
571 if (wverfp) {
572 *wverfp = wverf;
573 }
574 lck_mtx_lock(&nmp->nm_lock);
575 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
576 nmp->nm_verf = wverf;
577 nmp->nm_state |= NFSSTA_HASWRITEVERF;
578 } else if (nmp->nm_verf != wverf) {
579 nmp->nm_verf = wverf;
580 }
581 lck_mtx_unlock(&nmp->nm_lock);
582 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
583 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
584 nfsmout:
585 if (!lockerror) {
586 nfs_node_unlock(np);
587 }
588 nfsm_chain_cleanup(&nmrep);
589 if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async &&
590 ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) {
591 committed = NFS_WRITE_FILESYNC;
592 }
593 *iomodep = committed;
594 if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) {
595 microuptime(&np->n_lastio);
596 }
597 return error;
598 }
599
600 int
601 nfs4_remove_rpc(
602 nfsnode_t dnp,
603 char *name,
604 int namelen,
605 thread_t thd,
606 kauth_cred_t cred)
607 {
608 int error = 0, lockerror = ENOENT, remove_error = 0, status;
609 struct nfsmount *nmp;
610 int nfsvers, numops;
611 u_int64_t xid;
612 struct nfsm_chain nmreq, nmrep;
613 struct nfsreq_secinfo_args si;
614
615 nmp = NFSTONMP(dnp);
616 if (nfs_mount_gone(nmp)) {
617 return ENXIO;
618 }
619 nfsvers = nmp->nm_vers;
620 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
621 return EINVAL;
622 }
623 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
624 restart:
625 nfsm_chain_null(&nmreq);
626 nfsm_chain_null(&nmrep);
627
628 // PUTFH, REMOVE, GETATTR
629 numops = 3;
630 nfsm_chain_build_alloc_init(error, &nmreq, 17 * NFSX_UNSIGNED + namelen);
631 nfsm_chain_add_compound_header(error, &nmreq, "remove", nmp->nm_minor_vers, numops);
632 numops--;
633 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
634 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
635 numops--;
636 nfsm_chain_add_32(error, &nmreq, NFS_OP_REMOVE);
637 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
638 numops--;
639 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
640 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
641 nfsm_chain_build_done(error, &nmreq);
642 nfsm_assert(error, (numops == 0), EPROTO);
643 nfsmout_if(error);
644
645 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
646
647 if ((lockerror = nfs_node_lock(dnp))) {
648 error = lockerror;
649 }
650 nfsm_chain_skip_tag(error, &nmrep);
651 nfsm_chain_get_32(error, &nmrep, numops);
652 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
653 nfsm_chain_op_check(error, &nmrep, NFS_OP_REMOVE);
654 remove_error = error;
655 nfsm_chain_check_change_info(error, &nmrep, dnp);
656 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
657 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
658 if (error && !lockerror) {
659 NATTRINVALIDATE(dnp);
660 }
661 nfsmout:
662 nfsm_chain_cleanup(&nmreq);
663 nfsm_chain_cleanup(&nmrep);
664
665 if (!lockerror) {
666 dnp->n_flag |= NMODIFIED;
667 nfs_node_unlock(dnp);
668 }
669 if (error == NFSERR_GRACE) {
670 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
671 goto restart;
672 }
673
674 return remove_error;
675 }
676
677 int
678 nfs4_rename_rpc(
679 nfsnode_t fdnp,
680 char *fnameptr,
681 int fnamelen,
682 nfsnode_t tdnp,
683 char *tnameptr,
684 int tnamelen,
685 vfs_context_t ctx)
686 {
687 int error = 0, lockerror = ENOENT, status, nfsvers, numops;
688 struct nfsmount *nmp;
689 u_int64_t xid, savedxid;
690 struct nfsm_chain nmreq, nmrep;
691 struct nfsreq_secinfo_args si;
692
693 nmp = NFSTONMP(fdnp);
694 if (nfs_mount_gone(nmp)) {
695 return ENXIO;
696 }
697 nfsvers = nmp->nm_vers;
698 if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
699 return EINVAL;
700 }
701 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
702 return EINVAL;
703 }
704
705 NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0);
706 nfsm_chain_null(&nmreq);
707 nfsm_chain_null(&nmrep);
708
709 // PUTFH(FROM), SAVEFH, PUTFH(TO), RENAME, GETATTR(TO), RESTOREFH, GETATTR(FROM)
710 numops = 7;
711 nfsm_chain_build_alloc_init(error, &nmreq, 30 * NFSX_UNSIGNED + fnamelen + tnamelen);
712 nfsm_chain_add_compound_header(error, &nmreq, "rename", nmp->nm_minor_vers, numops);
713 numops--;
714 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
715 nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize);
716 numops--;
717 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
718 numops--;
719 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
720 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
721 numops--;
722 nfsm_chain_add_32(error, &nmreq, NFS_OP_RENAME);
723 nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp);
724 nfsm_chain_add_name(error, &nmreq, tnameptr, tnamelen, nmp);
725 numops--;
726 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
727 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
728 numops--;
729 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
730 numops--;
731 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
732 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, fdnp);
733 nfsm_chain_build_done(error, &nmreq);
734 nfsm_assert(error, (numops == 0), EPROTO);
735 nfsmout_if(error);
736
737 error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
738
739 if ((lockerror = nfs_node_lock2(fdnp, tdnp))) {
740 error = lockerror;
741 }
742 nfsm_chain_skip_tag(error, &nmrep);
743 nfsm_chain_get_32(error, &nmrep, numops);
744 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
745 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
746 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
747 nfsm_chain_op_check(error, &nmrep, NFS_OP_RENAME);
748 nfsm_chain_check_change_info(error, &nmrep, fdnp);
749 nfsm_chain_check_change_info(error, &nmrep, tdnp);
750 /* directory attributes: if we don't get them, make sure to invalidate */
751 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
752 savedxid = xid;
753 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
754 if (error && !lockerror) {
755 NATTRINVALIDATE(tdnp);
756 }
757 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
758 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
759 xid = savedxid;
760 nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid);
761 if (error && !lockerror) {
762 NATTRINVALIDATE(fdnp);
763 }
764 nfsmout:
765 nfsm_chain_cleanup(&nmreq);
766 nfsm_chain_cleanup(&nmrep);
767 if (!lockerror) {
768 fdnp->n_flag |= NMODIFIED;
769 tdnp->n_flag |= NMODIFIED;
770 nfs_node_unlock2(fdnp, tdnp);
771 }
772 return error;
773 }
774
775 /*
776 * NFS V4 readdir RPC.
777 */
778 int
779 nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx)
780 {
781 struct nfsmount *nmp;
782 int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops;
783 int i, status, more_entries = 1, eof, bp_dropped = 0;
784 uint32_t nmreaddirsize, nmrsize;
785 uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed;
786 uint64_t cookie, lastcookie, xid, savedxid;
787 struct nfsm_chain nmreq, nmrep, nmrepsave;
788 fhandle_t fh;
789 struct nfs_vattr nvattr, *nvattrp;
790 struct nfs_dir_buf_header *ndbhp;
791 struct direntry *dp;
792 char *padstart, padlen;
793 const char *tag;
794 uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN];
795 struct timeval now;
796 struct nfsreq_secinfo_args si;
797
798 nmp = NFSTONMP(dnp);
799 if (nfs_mount_gone(nmp)) {
800 return ENXIO;
801 }
802 nfsvers = nmp->nm_vers;
803 nmreaddirsize = nmp->nm_readdirsize;
804 nmrsize = nmp->nm_rsize;
805 bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES;
806 namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0;
807 rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0;
808 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
809 return EINVAL;
810 }
811 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
812
813 /*
814 * Set up attribute request for entries.
815 * For READDIRPLUS functionality, get everything.
816 * Otherwise, just get what we need for struct direntry.
817 */
818 if (rdirplus) {
819 tag = "readdirplus";
820 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, entry_attrs);
821 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEHANDLE);
822 } else {
823 tag = "readdir";
824 NFS_CLEAR_ATTRIBUTES(entry_attrs);
825 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_TYPE);
826 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_FILEID);
827 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_MOUNTED_ON_FILEID);
828 }
829 NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR);
830
831 /* lock to protect access to cookie verifier */
832 if ((lockerror = nfs_node_lock(dnp))) {
833 return lockerror;
834 }
835
836 /* determine cookie to use, and move dp to the right offset */
837 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
838 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
839 if (ndbhp->ndbh_count) {
840 for (i = 0; i < ndbhp->ndbh_count - 1; i++) {
841 dp = NFS_DIRENTRY_NEXT(dp);
842 }
843 cookie = dp->d_seekoff;
844 dp = NFS_DIRENTRY_NEXT(dp);
845 } else {
846 cookie = bp->nb_lblkno;
847 /* increment with every buffer read */
848 OSAddAtomic64(1, &nfsstats.readdir_bios);
849 }
850 lastcookie = cookie;
851
852 /*
853 * The NFS client is responsible for the "." and ".." entries in the
854 * directory. So, we put them at the start of the first buffer.
855 * Don't bother for attribute directories.
856 */
857 if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) &&
858 !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
859 fh.fh_len = 0;
860 fhlen = rdirplus ? fh.fh_len + 1 : 0;
861 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
862 /* "." */
863 namlen = 1;
864 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
865 if (xlen) {
866 bzero(&dp->d_name[namlen + 1], xlen);
867 }
868 dp->d_namlen = namlen;
869 strlcpy(dp->d_name, ".", namlen + 1);
870 dp->d_fileno = dnp->n_vattr.nva_fileid;
871 dp->d_type = DT_DIR;
872 dp->d_reclen = reclen;
873 dp->d_seekoff = 1;
874 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
875 dp = NFS_DIRENTRY_NEXT(dp);
876 padlen = (char*)dp - padstart;
877 if (padlen > 0) {
878 bzero(padstart, padlen);
879 }
880 if (rdirplus) { /* zero out attributes */
881 bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr));
882 }
883
884 /* ".." */
885 namlen = 2;
886 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
887 if (xlen) {
888 bzero(&dp->d_name[namlen + 1], xlen);
889 }
890 dp->d_namlen = namlen;
891 strlcpy(dp->d_name, "..", namlen + 1);
892 if (dnp->n_parent) {
893 dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid;
894 } else {
895 dp->d_fileno = dnp->n_vattr.nva_fileid;
896 }
897 dp->d_type = DT_DIR;
898 dp->d_reclen = reclen;
899 dp->d_seekoff = 2;
900 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
901 dp = NFS_DIRENTRY_NEXT(dp);
902 padlen = (char*)dp - padstart;
903 if (padlen > 0) {
904 bzero(padstart, padlen);
905 }
906 if (rdirplus) { /* zero out attributes */
907 bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr));
908 }
909
910 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
911 ndbhp->ndbh_count = 2;
912 }
913
914 /*
915 * Loop around doing readdir(plus) RPCs of size nm_readdirsize until
916 * the buffer is full (or we hit EOF). Then put the remainder of the
917 * results in the next buffer(s).
918 */
919 nfsm_chain_null(&nmreq);
920 nfsm_chain_null(&nmrep);
921 while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) {
922 // PUTFH, GETATTR, READDIR
923 numops = 3;
924 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
925 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
926 numops--;
927 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
928 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
929 numops--;
930 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
931 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
932 numops--;
933 nfsm_chain_add_32(error, &nmreq, NFS_OP_READDIR);
934 nfsm_chain_add_64(error, &nmreq, (cookie <= 2) ? 0 : cookie);
935 nfsm_chain_add_64(error, &nmreq, dnp->n_cookieverf);
936 nfsm_chain_add_32(error, &nmreq, nmreaddirsize);
937 nfsm_chain_add_32(error, &nmreq, nmrsize);
938 nfsm_chain_add_bitmap_supported(error, &nmreq, entry_attrs, nmp, dnp);
939 nfsm_chain_build_done(error, &nmreq);
940 nfsm_assert(error, (numops == 0), EPROTO);
941 nfs_node_unlock(dnp);
942 nfsmout_if(error);
943 error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
944
945 if ((lockerror = nfs_node_lock(dnp))) {
946 error = lockerror;
947 }
948
949 savedxid = xid;
950 nfsm_chain_skip_tag(error, &nmrep);
951 nfsm_chain_get_32(error, &nmrep, numops);
952 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
953 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
954 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
955 nfsm_chain_op_check(error, &nmrep, NFS_OP_READDIR);
956 nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf);
957 nfsm_chain_get_32(error, &nmrep, more_entries);
958
959 if (!lockerror) {
960 nfs_node_unlock(dnp);
961 lockerror = ENOENT;
962 }
963 nfsmout_if(error);
964
965 if (rdirplus) {
966 microuptime(&now);
967 if (lastcookie == 0) {
968 dnp->n_rdirplusstamp_sof = now.tv_sec;
969 dnp->n_rdirplusstamp_eof = 0;
970 }
971 }
972
973 /* loop through the entries packing them into the buffer */
974 while (more_entries) {
975 /* Entry: COOKIE, NAME, FATTR */
976 nfsm_chain_get_64(error, &nmrep, cookie);
977 nfsm_chain_get_32(error, &nmrep, namlen);
978 nfsmout_if(error);
979 if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) {
980 /* we've got a big cookie, make sure flag is set */
981 lck_mtx_lock(&nmp->nm_lock);
982 nmp->nm_state |= NFSSTA_BIGCOOKIES;
983 lck_mtx_unlock(&nmp->nm_lock);
984 bigcookies = 1;
985 }
986 /* just truncate names that don't fit in direntry.d_name */
987 if (namlen <= 0) {
988 error = EBADRPC;
989 goto nfsmout;
990 }
991 if (namlen > (sizeof(dp->d_name) - 1)) {
992 skiplen = namlen - sizeof(dp->d_name) + 1;
993 namlen = sizeof(dp->d_name) - 1;
994 } else {
995 skiplen = 0;
996 }
997 /* guess that fh size will be same as parent */
998 fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0;
999 xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0;
1000 attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0;
1001 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1002 space_needed = reclen + attrlen;
1003 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1004 if (space_needed > space_free) {
1005 /*
1006 * We still have entries to pack, but we've
1007 * run out of room in the current buffer.
1008 * So we need to move to the next buffer.
1009 * The block# for the next buffer is the
1010 * last cookie in the current buffer.
1011 */
1012 nextbuffer:
1013 ndbhp->ndbh_flags |= NDB_FULL;
1014 nfs_buf_release(bp, 0);
1015 bp_dropped = 1;
1016 bp = NULL;
1017 error = nfs_buf_get(dnp, lastcookie, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
1018 nfsmout_if(error);
1019 /* initialize buffer */
1020 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
1021 ndbhp->ndbh_flags = 0;
1022 ndbhp->ndbh_count = 0;
1023 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
1024 ndbhp->ndbh_ncgen = dnp->n_ncgen;
1025 space_free = nfs_dir_buf_freespace(bp, rdirplus);
1026 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
1027 /* increment with every buffer read */
1028 OSAddAtomic64(1, &nfsstats.readdir_bios);
1029 }
1030 nmrepsave = nmrep;
1031 dp->d_fileno = cookie; /* placeholder */
1032 dp->d_seekoff = cookie;
1033 dp->d_namlen = namlen;
1034 dp->d_reclen = reclen;
1035 dp->d_type = DT_UNKNOWN;
1036 nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name);
1037 nfsmout_if(error);
1038 dp->d_name[namlen] = '\0';
1039 if (skiplen) {
1040 nfsm_chain_adv(error, &nmrep,
1041 nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen));
1042 }
1043 nfsmout_if(error);
1044 nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr;
1045 error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL);
1046 if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) {
1047 /* we do NOT want ACLs returned to us here */
1048 NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL);
1049 if (nvattrp->nva_acl) {
1050 kauth_acl_free(nvattrp->nva_acl);
1051 nvattrp->nva_acl = NULL;
1052 }
1053 }
1054 if (error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_RDATTR_ERROR)) {
1055 /* OK, we may not have gotten all of the attributes but we will use what we can. */
1056 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1057 /* set this up to look like a referral trigger */
1058 nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh);
1059 }
1060 error = 0;
1061 }
1062 /* check for more entries after this one */
1063 nfsm_chain_get_32(error, &nmrep, more_entries);
1064 nfsmout_if(error);
1065
1066 /* Skip any "." and ".." entries returned from server. */
1067 /* Also skip any bothersome named attribute entries. */
1068 if (((dp->d_name[0] == '.') && ((namlen == 1) || ((namlen == 2) && (dp->d_name[1] == '.')))) ||
1069 (namedattr && (namlen == 11) && (!strcmp(dp->d_name, "SUNWattr_ro") || !strcmp(dp->d_name, "SUNWattr_rw")))) {
1070 lastcookie = cookie;
1071 continue;
1072 }
1073
1074 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) {
1075 dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type));
1076 }
1077 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) {
1078 dp->d_fileno = nvattrp->nva_fileid;
1079 }
1080 if (rdirplus) {
1081 /* fileid is already in d_fileno, so stash xid in attrs */
1082 nvattrp->nva_fileid = savedxid;
1083 if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1084 fhlen = fh.fh_len + 1;
1085 xlen = fhlen + sizeof(time_t);
1086 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1087 space_needed = reclen + attrlen;
1088 if (space_needed > space_free) {
1089 /* didn't actually have the room... move on to next buffer */
1090 nmrep = nmrepsave;
1091 goto nextbuffer;
1092 }
1093 /* pack the file handle into the record */
1094 dp->d_name[dp->d_namlen + 1] = fh.fh_len;
1095 bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len);
1096 } else {
1097 /* mark the file handle invalid */
1098 fh.fh_len = 0;
1099 fhlen = fh.fh_len + 1;
1100 xlen = fhlen + sizeof(time_t);
1101 reclen = NFS_DIRENTRY_LEN(namlen + xlen);
1102 bzero(&dp->d_name[dp->d_namlen + 1], fhlen);
1103 }
1104 *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec;
1105 dp->d_reclen = reclen;
1106 nfs_rdirplus_update_node_attrs(dnp, dp, &fh, nvattrp, &savedxid);
1107 }
1108 padstart = dp->d_name + dp->d_namlen + 1 + xlen;
1109 ndbhp->ndbh_count++;
1110 lastcookie = cookie;
1111
1112 /* advance to next direntry in buffer */
1113 dp = NFS_DIRENTRY_NEXT(dp);
1114 ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data;
1115 /* zero out the pad bytes */
1116 padlen = (char*)dp - padstart;
1117 if (padlen > 0) {
1118 bzero(padstart, padlen);
1119 }
1120 }
1121 /* Finally, get the eof boolean */
1122 nfsm_chain_get_32(error, &nmrep, eof);
1123 nfsmout_if(error);
1124 if (eof) {
1125 ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF);
1126 nfs_node_lock_force(dnp);
1127 dnp->n_eofcookie = lastcookie;
1128 if (rdirplus) {
1129 dnp->n_rdirplusstamp_eof = now.tv_sec;
1130 }
1131 nfs_node_unlock(dnp);
1132 } else {
1133 more_entries = 1;
1134 }
1135 if (bp_dropped) {
1136 nfs_buf_release(bp, 0);
1137 bp = NULL;
1138 break;
1139 }
1140 if ((lockerror = nfs_node_lock(dnp))) {
1141 error = lockerror;
1142 }
1143 nfsmout_if(error);
1144 nfsm_chain_cleanup(&nmrep);
1145 nfsm_chain_null(&nmreq);
1146 }
1147 nfsmout:
1148 if (bp_dropped && bp) {
1149 nfs_buf_release(bp, 0);
1150 }
1151 if (!lockerror) {
1152 nfs_node_unlock(dnp);
1153 }
1154 nfsm_chain_cleanup(&nmreq);
1155 nfsm_chain_cleanup(&nmrep);
1156 return bp_dropped ? NFSERR_DIRBUFDROPPED : error;
1157 }
1158
1159 int
1160 nfs4_lookup_rpc_async(
1161 nfsnode_t dnp,
1162 char *name,
1163 int namelen,
1164 vfs_context_t ctx,
1165 struct nfsreq **reqp)
1166 {
1167 int error = 0, isdotdot = 0, nfsvers, numops;
1168 struct nfsm_chain nmreq;
1169 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1170 struct nfsmount *nmp;
1171 struct nfsreq_secinfo_args si;
1172
1173 nmp = NFSTONMP(dnp);
1174 if (nfs_mount_gone(nmp)) {
1175 return ENXIO;
1176 }
1177 nfsvers = nmp->nm_vers;
1178 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1179 return EINVAL;
1180 }
1181
1182 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1183 isdotdot = 1;
1184 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
1185 } else {
1186 NFSREQ_SECINFO_SET(&si, dnp, dnp->n_fhp, dnp->n_fhsize, name, namelen);
1187 }
1188
1189 nfsm_chain_null(&nmreq);
1190
1191 // PUTFH, GETATTR, LOOKUP(P), GETFH, GETATTR (FH)
1192 numops = 5;
1193 nfsm_chain_build_alloc_init(error, &nmreq, 20 * NFSX_UNSIGNED + namelen);
1194 nfsm_chain_add_compound_header(error, &nmreq, "lookup", nmp->nm_minor_vers, numops);
1195 numops--;
1196 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1197 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
1198 numops--;
1199 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1200 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
1201 numops--;
1202 if (isdotdot) {
1203 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUPP);
1204 } else {
1205 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
1206 nfsm_chain_add_name(error, &nmreq, name, namelen, nmp);
1207 }
1208 numops--;
1209 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH);
1210 numops--;
1211 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1212 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1213 /* some ".zfs" directories can't handle being asked for some attributes */
1214 if ((dnp->n_flag & NISDOTZFS) && !isdotdot) {
1215 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1216 }
1217 if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) {
1218 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1219 }
1220 if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) {
1221 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1222 }
1223 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
1224 nfsm_chain_build_done(error, &nmreq);
1225 nfsm_assert(error, (numops == 0), EPROTO);
1226 nfsmout_if(error);
1227 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
1228 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp);
1229 nfsmout:
1230 nfsm_chain_cleanup(&nmreq);
1231 return error;
1232 }
1233
1234
1235 int
1236 nfs4_lookup_rpc_async_finish(
1237 nfsnode_t dnp,
1238 char *name,
1239 int namelen,
1240 vfs_context_t ctx,
1241 struct nfsreq *req,
1242 u_int64_t *xidp,
1243 fhandle_t *fhp,
1244 struct nfs_vattr *nvap)
1245 {
1246 int error = 0, lockerror = ENOENT, status, nfsvers, numops, isdotdot = 0;
1247 uint32_t op = NFS_OP_LOOKUP;
1248 u_int64_t xid;
1249 struct nfsmount *nmp;
1250 struct nfsm_chain nmrep;
1251
1252 nmp = NFSTONMP(dnp);
1253 if (nmp == NULL) {
1254 return ENXIO;
1255 }
1256 nfsvers = nmp->nm_vers;
1257 if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) {
1258 isdotdot = 1;
1259 }
1260
1261 nfsm_chain_null(&nmrep);
1262
1263 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1264
1265 if ((lockerror = nfs_node_lock(dnp))) {
1266 error = lockerror;
1267 }
1268 nfsm_chain_skip_tag(error, &nmrep);
1269 nfsm_chain_get_32(error, &nmrep, numops);
1270 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1271 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1272 if (xidp) {
1273 *xidp = xid;
1274 }
1275 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
1276
1277 nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP));
1278 nfsmout_if(error || !fhp || !nvap);
1279 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1280 nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
1281 if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) {
1282 error = EBADRPC;
1283 }
1284 nfsmout_if(error);
1285 nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
1286 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1287 if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
1288 /* set this up to look like a referral trigger */
1289 nfs4_default_attrs_for_referral_trigger(dnp, name, namelen, nvap, fhp);
1290 error = 0;
1291 } else {
1292 nfsmout_if(error);
1293 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
1294 }
1295 nfsmout:
1296 if (!lockerror) {
1297 nfs_node_unlock(dnp);
1298 }
1299 nfsm_chain_cleanup(&nmrep);
1300 if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) {
1301 /* We still need to get SECINFO to set default for mount. */
1302 /* Do so for the first LOOKUP that returns successfully. */
1303 struct nfs_sec sec;
1304
1305 sec.count = NX_MAX_SEC_FLAVORS;
1306 error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count);
1307 /* [sigh] some implementations return "illegal" error for unsupported ops */
1308 if (error == NFSERR_OP_ILLEGAL) {
1309 error = 0;
1310 }
1311 if (!error) {
1312 /* set our default security flavor to the first in the list */
1313 lck_mtx_lock(&nmp->nm_lock);
1314 if (sec.count) {
1315 nmp->nm_auth = sec.flavors[0];
1316 }
1317 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
1318 lck_mtx_unlock(&nmp->nm_lock);
1319 }
1320 }
1321 return error;
1322 }
1323
1324 int
1325 nfs4_commit_rpc(
1326 nfsnode_t np,
1327 uint64_t offset,
1328 uint64_t count,
1329 kauth_cred_t cred,
1330 uint64_t wverf)
1331 {
1332 struct nfsmount *nmp;
1333 int error = 0, lockerror, status, nfsvers, numops;
1334 u_int64_t xid, newwverf;
1335 uint32_t count32;
1336 struct nfsm_chain nmreq, nmrep;
1337 struct nfsreq_secinfo_args si;
1338
1339 nmp = NFSTONMP(np);
1340 FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0);
1341 if (nfs_mount_gone(nmp)) {
1342 return ENXIO;
1343 }
1344 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1345 return EINVAL;
1346 }
1347 if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) {
1348 return 0;
1349 }
1350 nfsvers = nmp->nm_vers;
1351
1352 if (count > UINT32_MAX) {
1353 count32 = 0;
1354 } else {
1355 count32 = count;
1356 }
1357
1358 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1359 nfsm_chain_null(&nmreq);
1360 nfsm_chain_null(&nmrep);
1361
1362 // PUTFH, COMMIT, GETATTR
1363 numops = 3;
1364 nfsm_chain_build_alloc_init(error, &nmreq, 19 * NFSX_UNSIGNED);
1365 nfsm_chain_add_compound_header(error, &nmreq, "commit", nmp->nm_minor_vers, numops);
1366 numops--;
1367 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1368 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1369 numops--;
1370 nfsm_chain_add_32(error, &nmreq, NFS_OP_COMMIT);
1371 nfsm_chain_add_64(error, &nmreq, offset);
1372 nfsm_chain_add_32(error, &nmreq, count32);
1373 numops--;
1374 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1375 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
1376 nfsm_chain_build_done(error, &nmreq);
1377 nfsm_assert(error, (numops == 0), EPROTO);
1378 nfsmout_if(error);
1379 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
1380 current_thread(), cred, &si, 0, &nmrep, &xid, &status);
1381
1382 if ((lockerror = nfs_node_lock(np))) {
1383 error = lockerror;
1384 }
1385 nfsm_chain_skip_tag(error, &nmrep);
1386 nfsm_chain_get_32(error, &nmrep, numops);
1387 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1388 nfsm_chain_op_check(error, &nmrep, NFS_OP_COMMIT);
1389 nfsm_chain_get_64(error, &nmrep, newwverf);
1390 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1391 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1392 if (!lockerror) {
1393 nfs_node_unlock(np);
1394 }
1395 nfsmout_if(error);
1396 lck_mtx_lock(&nmp->nm_lock);
1397 if (nmp->nm_verf != newwverf) {
1398 nmp->nm_verf = newwverf;
1399 }
1400 if (wverf != newwverf) {
1401 error = NFSERR_STALEWRITEVERF;
1402 }
1403 lck_mtx_unlock(&nmp->nm_lock);
1404 nfsmout:
1405 nfsm_chain_cleanup(&nmreq);
1406 nfsm_chain_cleanup(&nmrep);
1407 return error;
1408 }
1409
1410 int
1411 nfs4_pathconf_rpc(
1412 nfsnode_t np,
1413 struct nfs_fsattr *nfsap,
1414 vfs_context_t ctx)
1415 {
1416 u_int64_t xid;
1417 int error = 0, lockerror, status, nfsvers, numops;
1418 struct nfsm_chain nmreq, nmrep;
1419 struct nfsmount *nmp = NFSTONMP(np);
1420 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
1421 struct nfs_vattr nvattr;
1422 struct nfsreq_secinfo_args si;
1423
1424 if (nfs_mount_gone(nmp)) {
1425 return ENXIO;
1426 }
1427 nfsvers = nmp->nm_vers;
1428 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1429 return EINVAL;
1430 }
1431
1432 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1433 NVATTR_INIT(&nvattr);
1434 nfsm_chain_null(&nmreq);
1435 nfsm_chain_null(&nmrep);
1436
1437 /* NFSv4: fetch "pathconf" info for this node */
1438 // PUTFH, GETATTR
1439 numops = 2;
1440 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
1441 nfsm_chain_add_compound_header(error, &nmreq, "pathconf", nmp->nm_minor_vers, numops);
1442 numops--;
1443 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1444 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1445 numops--;
1446 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1447 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
1448 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXLINK);
1449 NFS_BITMAP_SET(bitmap, NFS_FATTR_MAXNAME);
1450 NFS_BITMAP_SET(bitmap, NFS_FATTR_NO_TRUNC);
1451 NFS_BITMAP_SET(bitmap, NFS_FATTR_CHOWN_RESTRICTED);
1452 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_INSENSITIVE);
1453 NFS_BITMAP_SET(bitmap, NFS_FATTR_CASE_PRESERVING);
1454 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
1455 nfsm_chain_build_done(error, &nmreq);
1456 nfsm_assert(error, (numops == 0), EPROTO);
1457 nfsmout_if(error);
1458 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1459
1460 nfsm_chain_skip_tag(error, &nmrep);
1461 nfsm_chain_get_32(error, &nmrep, numops);
1462 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1463 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1464 nfsmout_if(error);
1465 error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL);
1466 nfsmout_if(error);
1467 if ((lockerror = nfs_node_lock(np))) {
1468 error = lockerror;
1469 }
1470 if (!error) {
1471 nfs_loadattrcache(np, &nvattr, &xid, 0);
1472 }
1473 if (!lockerror) {
1474 nfs_node_unlock(np);
1475 }
1476 nfsmout:
1477 NVATTR_CLEANUP(&nvattr);
1478 nfsm_chain_cleanup(&nmreq);
1479 nfsm_chain_cleanup(&nmrep);
1480 return error;
1481 }
1482
1483 int
1484 nfs4_vnop_getattr(
1485 struct vnop_getattr_args /* {
1486 * struct vnodeop_desc *a_desc;
1487 * vnode_t a_vp;
1488 * struct vnode_attr *a_vap;
1489 * vfs_context_t a_context;
1490 * } */*ap)
1491 {
1492 struct vnode_attr *vap = ap->a_vap;
1493 struct nfsmount *nmp;
1494 struct nfs_vattr nva;
1495 int error, acls, ngaflags;
1496
1497 nmp = VTONMP(ap->a_vp);
1498 if (nfs_mount_gone(nmp)) {
1499 return ENXIO;
1500 }
1501 acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL);
1502
1503 ngaflags = NGA_CACHED;
1504 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1505 ngaflags |= NGA_ACL;
1506 }
1507 error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags);
1508 if (error) {
1509 return error;
1510 }
1511
1512 /* copy what we have in nva to *a_vap */
1513 if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) {
1514 dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2);
1515 VATTR_RETURN(vap, va_rdev, rdev);
1516 }
1517 if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) {
1518 VATTR_RETURN(vap, va_nlink, nva.nva_nlink);
1519 }
1520 if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) {
1521 VATTR_RETURN(vap, va_data_size, nva.nva_size);
1522 }
1523 // VATTR_RETURN(vap, va_data_alloc, ???);
1524 // VATTR_RETURN(vap, va_total_size, ???);
1525 if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) {
1526 VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes);
1527 }
1528 if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1529 VATTR_RETURN(vap, va_uid, nva.nva_uid);
1530 }
1531 if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) {
1532 VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid);
1533 }
1534 if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1535 VATTR_RETURN(vap, va_gid, nva.nva_gid);
1536 }
1537 if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) {
1538 VATTR_RETURN(vap, va_guuid, nva.nva_guuid);
1539 }
1540 if (VATTR_IS_ACTIVE(vap, va_mode)) {
1541 if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) {
1542 VATTR_RETURN(vap, va_mode, 0777);
1543 } else {
1544 VATTR_RETURN(vap, va_mode, nva.nva_mode);
1545 }
1546 }
1547 if (VATTR_IS_ACTIVE(vap, va_flags) &&
1548 (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) ||
1549 NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) ||
1550 (nva.nva_flags & NFS_FFLAG_TRIGGER))) {
1551 uint32_t flags = 0;
1552 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) &&
1553 (nva.nva_flags & NFS_FFLAG_ARCHIVED)) {
1554 flags |= SF_ARCHIVED;
1555 }
1556 if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) &&
1557 (nva.nva_flags & NFS_FFLAG_HIDDEN)) {
1558 flags |= UF_HIDDEN;
1559 }
1560 VATTR_RETURN(vap, va_flags, flags);
1561 }
1562 if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) {
1563 vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE];
1564 vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE];
1565 VATTR_SET_SUPPORTED(vap, va_create_time);
1566 }
1567 if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) {
1568 vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS];
1569 vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS];
1570 VATTR_SET_SUPPORTED(vap, va_access_time);
1571 }
1572 if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) {
1573 vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY];
1574 vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY];
1575 VATTR_SET_SUPPORTED(vap, va_modify_time);
1576 }
1577 if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) {
1578 vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE];
1579 vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE];
1580 VATTR_SET_SUPPORTED(vap, va_change_time);
1581 }
1582 if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) {
1583 vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP];
1584 vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP];
1585 VATTR_SET_SUPPORTED(vap, va_backup_time);
1586 }
1587 if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) {
1588 VATTR_RETURN(vap, va_fileid, nva.nva_fileid);
1589 }
1590 if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) {
1591 VATTR_RETURN(vap, va_type, nva.nva_type);
1592 }
1593 if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) {
1594 VATTR_RETURN(vap, va_filerev, nva.nva_change);
1595 }
1596
1597 if (VATTR_IS_ACTIVE(vap, va_acl) && acls) {
1598 VATTR_RETURN(vap, va_acl, nva.nva_acl);
1599 nva.nva_acl = NULL;
1600 }
1601
1602 // other attrs we might support someday:
1603 // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */);
1604
1605 NVATTR_CLEANUP(&nva);
1606 return error;
1607 }
1608
1609 int
1610 nfs4_setattr_rpc(
1611 nfsnode_t np,
1612 struct vnode_attr *vap,
1613 vfs_context_t ctx)
1614 {
1615 struct nfsmount *nmp = NFSTONMP(np);
1616 int error = 0, setattr_error = 0, lockerror = ENOENT, status, nfsvers, numops;
1617 u_int64_t xid, nextxid;
1618 struct nfsm_chain nmreq, nmrep;
1619 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
1620 uint32_t getbitmap[NFS_ATTR_BITMAP_LEN];
1621 uint32_t setbitmap[NFS_ATTR_BITMAP_LEN];
1622 nfs_stateid stateid;
1623 struct nfsreq_secinfo_args si;
1624
1625 if (nfs_mount_gone(nmp)) {
1626 return ENXIO;
1627 }
1628 nfsvers = nmp->nm_vers;
1629 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
1630 return EINVAL;
1631 }
1632
1633 if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) {
1634 /* we don't support setting unsupported flags (duh!) */
1635 if (vap->va_active & ~VNODE_ATTR_va_flags) {
1636 return EINVAL; /* return EINVAL if other attributes also set */
1637 } else {
1638 return ENOTSUP; /* return ENOTSUP for chflags(2) */
1639 }
1640 }
1641
1642 /* don't bother requesting some changes if they don't look like they are changing */
1643 if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) {
1644 VATTR_CLEAR_ACTIVE(vap, va_uid);
1645 }
1646 if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) {
1647 VATTR_CLEAR_ACTIVE(vap, va_gid);
1648 }
1649 if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) {
1650 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1651 }
1652 if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) {
1653 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1654 }
1655
1656 tryagain:
1657 /* do nothing if no attributes will be sent */
1658 nfs_vattr_set_bitmap(nmp, bitmap, vap);
1659 if (!bitmap[0] && !bitmap[1]) {
1660 return 0;
1661 }
1662
1663 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
1664 nfsm_chain_null(&nmreq);
1665 nfsm_chain_null(&nmrep);
1666
1667 /*
1668 * Prepare GETATTR bitmap: if we are setting the ACL or mode, we
1669 * need to invalidate any cached ACL. And if we had an ACL cached,
1670 * we might as well also fetch the new value.
1671 */
1672 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap);
1673 if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) ||
1674 NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) {
1675 if (NACLVALID(np)) {
1676 NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL);
1677 }
1678 NACLINVALIDATE(np);
1679 }
1680
1681 // PUTFH, SETATTR, GETATTR
1682 numops = 3;
1683 nfsm_chain_build_alloc_init(error, &nmreq, 40 * NFSX_UNSIGNED);
1684 nfsm_chain_add_compound_header(error, &nmreq, "setattr", nmp->nm_minor_vers, numops);
1685 numops--;
1686 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
1687 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
1688 numops--;
1689 nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR);
1690 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
1691 nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid);
1692 } else {
1693 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
1694 }
1695 nfsm_chain_add_stateid(error, &nmreq, &stateid);
1696 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
1697 numops--;
1698 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
1699 nfsm_chain_add_bitmap_supported(error, &nmreq, getbitmap, nmp, np);
1700 nfsm_chain_build_done(error, &nmreq);
1701 nfsm_assert(error, (numops == 0), EPROTO);
1702 nfsmout_if(error);
1703 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
1704
1705 if ((lockerror = nfs_node_lock(np))) {
1706 error = lockerror;
1707 }
1708 nfsm_chain_skip_tag(error, &nmrep);
1709 nfsm_chain_get_32(error, &nmrep, numops);
1710 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1711 nfsmout_if(error);
1712 nfsm_chain_op_check(error, &nmrep, NFS_OP_SETATTR);
1713 nfsmout_if(error == EBADRPC);
1714 setattr_error = error;
1715 error = 0;
1716 bmlen = NFS_ATTR_BITMAP_LEN;
1717 nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen);
1718 if (!error) {
1719 if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
1720 microuptime(&np->n_lastio);
1721 }
1722 nfs_vattr_set_supported(setbitmap, vap);
1723 error = setattr_error;
1724 }
1725 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1726 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
1727 if (error) {
1728 NATTRINVALIDATE(np);
1729 }
1730 /*
1731 * We just changed the attributes and we want to make sure that we
1732 * see the latest attributes. Get the next XID. If it's not the
1733 * next XID after the SETATTR XID, then it's possible that another
1734 * RPC was in flight at the same time and it might put stale attributes
1735 * in the cache. In that case, we invalidate the attributes and set
1736 * the attribute cache XID to guarantee that newer attributes will
1737 * get loaded next.
1738 */
1739 nextxid = 0;
1740 nfs_get_xid(&nextxid);
1741 if (nextxid != (xid + 1)) {
1742 np->n_xid = nextxid;
1743 NATTRINVALIDATE(np);
1744 }
1745 nfsmout:
1746 if (!lockerror) {
1747 nfs_node_unlock(np);
1748 }
1749 nfsm_chain_cleanup(&nmreq);
1750 nfsm_chain_cleanup(&nmrep);
1751 if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) {
1752 /*
1753 * Some server's may not like ACL/mode combos that get sent.
1754 * If it looks like that's what the server choked on, try setting
1755 * just the ACL and not the mode (unless it looks like everything
1756 * but mode was already successfully set).
1757 */
1758 if (((bitmap[0] & setbitmap[0]) != bitmap[0]) ||
1759 ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) {
1760 VATTR_CLEAR_ACTIVE(vap, va_mode);
1761 error = 0;
1762 goto tryagain;
1763 }
1764 }
1765 return error;
1766 }
1767 #endif /* CONFIG_NFS4 */
1768
1769 /*
1770 * Wait for any pending recovery to complete.
1771 */
1772 int
1773 nfs_mount_state_wait_for_recovery(struct nfsmount *nmp)
1774 {
1775 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1776 int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
1777
1778 lck_mtx_lock(&nmp->nm_lock);
1779 while (nmp->nm_state & NFSSTA_RECOVER) {
1780 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) {
1781 break;
1782 }
1783 nfs_mount_sock_thread_wake(nmp);
1784 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1785 slpflag = 0;
1786 }
1787 lck_mtx_unlock(&nmp->nm_lock);
1788
1789 return error;
1790 }
1791
1792 /*
1793 * We're about to use/manipulate NFS mount's open/lock state.
1794 * Wait for any pending state recovery to complete, then
1795 * mark the state as being in use (which will hold off
1796 * the recovery thread until we're done).
1797 */
1798 int
1799 nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd)
1800 {
1801 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
1802 int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1803
1804 if (nfs_mount_gone(nmp)) {
1805 return ENXIO;
1806 }
1807 lck_mtx_lock(&nmp->nm_lock);
1808 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
1809 lck_mtx_unlock(&nmp->nm_lock);
1810 return ENXIO;
1811 }
1812 while (nmp->nm_state & NFSSTA_RECOVER) {
1813 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1814 break;
1815 }
1816 nfs_mount_sock_thread_wake(nmp);
1817 msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts);
1818 slpflag = 0;
1819 }
1820 if (!error) {
1821 nmp->nm_stateinuse++;
1822 }
1823 lck_mtx_unlock(&nmp->nm_lock);
1824
1825 return error;
1826 }
1827
1828 /*
1829 * We're done using/manipulating the NFS mount's open/lock
1830 * state. If the given error indicates that recovery should
1831 * be performed, we'll initiate recovery.
1832 */
1833 int
1834 nfs_mount_state_in_use_end(struct nfsmount *nmp, int error)
1835 {
1836 int restart = nfs_mount_state_error_should_restart(error);
1837
1838 if (nfs_mount_gone(nmp)) {
1839 return restart;
1840 }
1841 lck_mtx_lock(&nmp->nm_lock);
1842 if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) {
1843 printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n",
1844 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid);
1845 nfs_need_recover(nmp, error);
1846 }
1847 if (nmp->nm_stateinuse > 0) {
1848 nmp->nm_stateinuse--;
1849 } else {
1850 panic("NFS mount state in use count underrun");
1851 }
1852 if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) {
1853 wakeup(&nmp->nm_stateinuse);
1854 }
1855 lck_mtx_unlock(&nmp->nm_lock);
1856 if (error == NFSERR_GRACE) {
1857 tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz);
1858 }
1859
1860 return restart;
1861 }
1862
1863 /*
1864 * Does the error mean we should restart/redo a state-related operation?
1865 */
1866 int
1867 nfs_mount_state_error_should_restart(int error)
1868 {
1869 switch (error) {
1870 case NFSERR_STALE_STATEID:
1871 case NFSERR_STALE_CLIENTID:
1872 case NFSERR_ADMIN_REVOKED:
1873 case NFSERR_EXPIRED:
1874 case NFSERR_OLD_STATEID:
1875 case NFSERR_BAD_STATEID:
1876 case NFSERR_GRACE:
1877 return 1;
1878 }
1879 return 0;
1880 }
1881
1882 /*
1883 * In some cases we may want to limit how many times we restart a
1884 * state-related operation - e.g. we're repeatedly getting NFSERR_GRACE.
1885 * Base the limit on the lease (as long as it's not too short).
1886 */
1887 uint
1888 nfs_mount_state_max_restarts(struct nfsmount *nmp)
1889 {
1890 return MAX(nmp->nm_fsattr.nfsa_lease, 60);
1891 }
1892
1893 /*
1894 * Does the error mean we probably lost a delegation?
1895 */
1896 int
1897 nfs_mount_state_error_delegation_lost(int error)
1898 {
1899 switch (error) {
1900 case NFSERR_STALE_STATEID:
1901 case NFSERR_ADMIN_REVOKED:
1902 case NFSERR_EXPIRED:
1903 case NFSERR_OLD_STATEID:
1904 case NFSERR_BAD_STATEID:
1905 case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */
1906 return 1;
1907 }
1908 return 0;
1909 }
1910
1911
1912 /*
1913 * Mark an NFS node's open state as busy.
1914 */
1915 int
1916 nfs_open_state_set_busy(nfsnode_t np, thread_t thd)
1917 {
1918 struct nfsmount *nmp;
1919 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1920 int error = 0, slpflag;
1921
1922 nmp = NFSTONMP(np);
1923 if (nfs_mount_gone(nmp)) {
1924 return ENXIO;
1925 }
1926 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
1927
1928 lck_mtx_lock(&np->n_openlock);
1929 while (np->n_openflags & N_OPENBUSY) {
1930 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1931 break;
1932 }
1933 np->n_openflags |= N_OPENWANT;
1934 msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts);
1935 slpflag = 0;
1936 }
1937 if (!error) {
1938 np->n_openflags |= N_OPENBUSY;
1939 }
1940 lck_mtx_unlock(&np->n_openlock);
1941
1942 return error;
1943 }
1944
1945 /*
1946 * Clear an NFS node's open state busy flag and wake up
1947 * anyone wanting it.
1948 */
1949 void
1950 nfs_open_state_clear_busy(nfsnode_t np)
1951 {
1952 int wanted;
1953
1954 lck_mtx_lock(&np->n_openlock);
1955 if (!(np->n_openflags & N_OPENBUSY)) {
1956 panic("nfs_open_state_clear_busy");
1957 }
1958 wanted = (np->n_openflags & N_OPENWANT);
1959 np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT);
1960 lck_mtx_unlock(&np->n_openlock);
1961 if (wanted) {
1962 wakeup(&np->n_openflags);
1963 }
1964 }
1965
1966 /*
1967 * Search a mount's open owner list for the owner for this credential.
1968 * If not found and "alloc" is set, then allocate a new one.
1969 */
1970 struct nfs_open_owner *
1971 nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc)
1972 {
1973 uid_t uid = kauth_cred_getuid(cred);
1974 struct nfs_open_owner *noop, *newnoop = NULL;
1975
1976 tryagain:
1977 lck_mtx_lock(&nmp->nm_lock);
1978 TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) {
1979 if (kauth_cred_getuid(noop->noo_cred) == uid) {
1980 break;
1981 }
1982 }
1983
1984 if (!noop && !newnoop && alloc) {
1985 lck_mtx_unlock(&nmp->nm_lock);
1986 MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK);
1987 if (!newnoop) {
1988 return NULL;
1989 }
1990 bzero(newnoop, sizeof(*newnoop));
1991 lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL);
1992 newnoop->noo_mount = nmp;
1993 kauth_cred_ref(cred);
1994 newnoop->noo_cred = cred;
1995 newnoop->noo_name = OSAddAtomic(1, &nfs_open_owner_seqnum);
1996 TAILQ_INIT(&newnoop->noo_opens);
1997 goto tryagain;
1998 }
1999 if (!noop && newnoop) {
2000 newnoop->noo_flags |= NFS_OPEN_OWNER_LINK;
2001 os_ref_init(&newnoop->noo_refcnt, NULL);
2002 TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link);
2003 noop = newnoop;
2004 }
2005 lck_mtx_unlock(&nmp->nm_lock);
2006
2007 if (newnoop && (noop != newnoop)) {
2008 nfs_open_owner_destroy(newnoop);
2009 }
2010
2011 if (noop) {
2012 nfs_open_owner_ref(noop);
2013 }
2014
2015 return noop;
2016 }
2017
2018 /*
2019 * destroy an open owner that's no longer needed
2020 */
2021 void
2022 nfs_open_owner_destroy(struct nfs_open_owner *noop)
2023 {
2024 if (noop->noo_cred) {
2025 kauth_cred_unref(&noop->noo_cred);
2026 }
2027 lck_mtx_destroy(&noop->noo_lock, nfs_open_grp);
2028 FREE(noop, M_TEMP);
2029 }
2030
2031 /*
2032 * acquire a reference count on an open owner
2033 */
2034 void
2035 nfs_open_owner_ref(struct nfs_open_owner *noop)
2036 {
2037 lck_mtx_lock(&noop->noo_lock);
2038 os_ref_retain_locked(&noop->noo_refcnt);
2039 lck_mtx_unlock(&noop->noo_lock);
2040 }
2041
2042 /*
2043 * drop a reference count on an open owner and destroy it if
2044 * it is no longer referenced and no longer on the mount's list.
2045 */
2046 void
2047 nfs_open_owner_rele(struct nfs_open_owner *noop)
2048 {
2049 os_ref_count_t newcount;
2050
2051 lck_mtx_lock(&noop->noo_lock);
2052 if (os_ref_get_count(&noop->noo_refcnt) < 1) {
2053 panic("nfs_open_owner_rele: no refcnt");
2054 }
2055 newcount = os_ref_release_locked(&noop->noo_refcnt);
2056 if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2057 panic("nfs_open_owner_rele: busy");
2058 }
2059 /* XXX we may potentially want to clean up idle/unused open owner structures */
2060 if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) {
2061 lck_mtx_unlock(&noop->noo_lock);
2062 return;
2063 }
2064 /* owner is no longer referenced or linked to mount, so destroy it */
2065 lck_mtx_unlock(&noop->noo_lock);
2066 nfs_open_owner_destroy(noop);
2067 }
2068
2069 /*
2070 * Mark an open owner as busy because we are about to
2071 * start an operation that uses and updates open owner state.
2072 */
2073 int
2074 nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd)
2075 {
2076 struct nfsmount *nmp;
2077 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2078 int error = 0, slpflag;
2079
2080 nmp = noop->noo_mount;
2081 if (nfs_mount_gone(nmp)) {
2082 return ENXIO;
2083 }
2084 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2085
2086 lck_mtx_lock(&noop->noo_lock);
2087 while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) {
2088 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2089 break;
2090 }
2091 noop->noo_flags |= NFS_OPEN_OWNER_WANT;
2092 msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts);
2093 slpflag = 0;
2094 }
2095 if (!error) {
2096 noop->noo_flags |= NFS_OPEN_OWNER_BUSY;
2097 }
2098 lck_mtx_unlock(&noop->noo_lock);
2099
2100 return error;
2101 }
2102
2103 /*
2104 * Clear the busy flag on an open owner and wake up anyone waiting
2105 * to mark it busy.
2106 */
2107 void
2108 nfs_open_owner_clear_busy(struct nfs_open_owner *noop)
2109 {
2110 int wanted;
2111
2112 lck_mtx_lock(&noop->noo_lock);
2113 if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) {
2114 panic("nfs_open_owner_clear_busy");
2115 }
2116 wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT);
2117 noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT);
2118 lck_mtx_unlock(&noop->noo_lock);
2119 if (wanted) {
2120 wakeup(noop);
2121 }
2122 }
2123
2124 /*
2125 * Given an open/lock owner and an error code, increment the
2126 * sequence ID if appropriate.
2127 */
2128 void
2129 nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nlop, int error)
2130 {
2131 switch (error) {
2132 case NFSERR_STALE_CLIENTID:
2133 case NFSERR_STALE_STATEID:
2134 case NFSERR_OLD_STATEID:
2135 case NFSERR_BAD_STATEID:
2136 case NFSERR_BAD_SEQID:
2137 case NFSERR_BADXDR:
2138 case NFSERR_RESOURCE:
2139 case NFSERR_NOFILEHANDLE:
2140 /* do not increment the open seqid on these errors */
2141 return;
2142 }
2143 if (noop) {
2144 noop->noo_seqid++;
2145 }
2146 if (nlop) {
2147 nlop->nlo_seqid++;
2148 }
2149 }
2150
2151 /*
2152 * Search a node's open file list for any conflicts with this request.
2153 * Also find this open owner's open file structure.
2154 * If not found and "alloc" is set, then allocate one.
2155 */
2156 int
2157 nfs_open_file_find(
2158 nfsnode_t np,
2159 struct nfs_open_owner *noop,
2160 struct nfs_open_file **nofpp,
2161 uint32_t accessMode,
2162 uint32_t denyMode,
2163 int alloc)
2164 {
2165 *nofpp = NULL;
2166 return nfs_open_file_find_internal(np, noop, nofpp, accessMode, denyMode, alloc);
2167 }
2168
2169 /*
2170 * Internally, allow using a provisional nodeless nofp (passed in via *nofpp)
2171 * if an existing one is not found. This is used in "create" scenarios to
2172 * officially add the provisional nofp to the node once the node is created.
2173 */
2174 int
2175 nfs_open_file_find_internal(
2176 nfsnode_t np,
2177 struct nfs_open_owner *noop,
2178 struct nfs_open_file **nofpp,
2179 uint32_t accessMode,
2180 uint32_t denyMode,
2181 int alloc)
2182 {
2183 struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL;
2184
2185 if (!np) {
2186 goto alloc;
2187 }
2188 tryagain:
2189 lck_mtx_lock(&np->n_openlock);
2190 TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) {
2191 if (nofp2->nof_owner == noop) {
2192 nofp = nofp2;
2193 if (!accessMode) {
2194 break;
2195 }
2196 }
2197 if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) {
2198 /* This request conflicts with an existing open on this client. */
2199 lck_mtx_unlock(&np->n_openlock);
2200 return EACCES;
2201 }
2202 }
2203
2204 /*
2205 * If this open owner doesn't have an open
2206 * file structure yet, we create one for it.
2207 */
2208 if (!nofp && !*nofpp && !newnofp && alloc) {
2209 lck_mtx_unlock(&np->n_openlock);
2210 alloc:
2211 MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK);
2212 if (!newnofp) {
2213 return ENOMEM;
2214 }
2215 bzero(newnofp, sizeof(*newnofp));
2216 lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL);
2217 newnofp->nof_owner = noop;
2218 nfs_open_owner_ref(noop);
2219 newnofp->nof_np = np;
2220 lck_mtx_lock(&noop->noo_lock);
2221 TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink);
2222 lck_mtx_unlock(&noop->noo_lock);
2223 if (np) {
2224 goto tryagain;
2225 }
2226 }
2227 if (!nofp) {
2228 if (*nofpp) {
2229 (*nofpp)->nof_np = np;
2230 nofp = *nofpp;
2231 } else {
2232 nofp = newnofp;
2233 }
2234 if (nofp && np) {
2235 TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link);
2236 }
2237 }
2238 if (np) {
2239 lck_mtx_unlock(&np->n_openlock);
2240 }
2241
2242 if (alloc && newnofp && (nofp != newnofp)) {
2243 nfs_open_file_destroy(newnofp);
2244 }
2245
2246 *nofpp = nofp;
2247 return nofp ? 0 : ESRCH;
2248 }
2249
2250 /*
2251 * Destroy an open file structure.
2252 */
2253 void
2254 nfs_open_file_destroy(struct nfs_open_file *nofp)
2255 {
2256 lck_mtx_lock(&nofp->nof_owner->noo_lock);
2257 TAILQ_REMOVE(&nofp->nof_owner->noo_opens, nofp, nof_oolink);
2258 lck_mtx_unlock(&nofp->nof_owner->noo_lock);
2259 nfs_open_owner_rele(nofp->nof_owner);
2260 lck_mtx_destroy(&nofp->nof_lock, nfs_open_grp);
2261 FREE(nofp, M_TEMP);
2262 }
2263
2264 /*
2265 * Mark an open file as busy because we are about to
2266 * start an operation that uses and updates open file state.
2267 */
2268 int
2269 nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd)
2270 {
2271 struct nfsmount *nmp;
2272 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2273 int error = 0, slpflag;
2274
2275 nmp = nofp->nof_owner->noo_mount;
2276 if (nfs_mount_gone(nmp)) {
2277 return ENXIO;
2278 }
2279 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
2280
2281 lck_mtx_lock(&nofp->nof_lock);
2282 while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) {
2283 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
2284 break;
2285 }
2286 nofp->nof_flags |= NFS_OPEN_FILE_WANT;
2287 msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts);
2288 slpflag = 0;
2289 }
2290 if (!error) {
2291 nofp->nof_flags |= NFS_OPEN_FILE_BUSY;
2292 }
2293 lck_mtx_unlock(&nofp->nof_lock);
2294
2295 return error;
2296 }
2297
2298 /*
2299 * Clear the busy flag on an open file and wake up anyone waiting
2300 * to mark it busy.
2301 */
2302 void
2303 nfs_open_file_clear_busy(struct nfs_open_file *nofp)
2304 {
2305 int wanted;
2306
2307 lck_mtx_lock(&nofp->nof_lock);
2308 if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) {
2309 panic("nfs_open_file_clear_busy");
2310 }
2311 wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT);
2312 nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT);
2313 lck_mtx_unlock(&nofp->nof_lock);
2314 if (wanted) {
2315 wakeup(nofp);
2316 }
2317 }
2318
2319 /*
2320 * Add the open state for the given access/deny modes to this open file.
2321 */
2322 void
2323 nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, int delegated)
2324 {
2325 lck_mtx_lock(&nofp->nof_lock);
2326 nofp->nof_access |= accessMode;
2327 nofp->nof_deny |= denyMode;
2328
2329 if (delegated) {
2330 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2331 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2332 nofp->nof_d_r++;
2333 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2334 nofp->nof_d_w++;
2335 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2336 nofp->nof_d_rw++;
2337 }
2338 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2339 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2340 nofp->nof_d_r_dw++;
2341 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2342 nofp->nof_d_w_dw++;
2343 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2344 nofp->nof_d_rw_dw++;
2345 }
2346 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2347 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2348 nofp->nof_d_r_drw++;
2349 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2350 nofp->nof_d_w_drw++;
2351 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2352 nofp->nof_d_rw_drw++;
2353 }
2354 }
2355 } else {
2356 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2357 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2358 nofp->nof_r++;
2359 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2360 nofp->nof_w++;
2361 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2362 nofp->nof_rw++;
2363 }
2364 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2365 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2366 nofp->nof_r_dw++;
2367 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2368 nofp->nof_w_dw++;
2369 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2370 nofp->nof_rw_dw++;
2371 }
2372 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2373 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2374 nofp->nof_r_drw++;
2375 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2376 nofp->nof_w_drw++;
2377 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2378 nofp->nof_rw_drw++;
2379 }
2380 }
2381 }
2382
2383 nofp->nof_opencnt++;
2384 lck_mtx_unlock(&nofp->nof_lock);
2385 }
2386
2387 /*
2388 * Find which particular open combo will be closed and report what
2389 * the new modes will be and whether the open was delegated.
2390 */
2391 void
2392 nfs_open_file_remove_open_find(
2393 struct nfs_open_file *nofp,
2394 uint32_t accessMode,
2395 uint32_t denyMode,
2396 uint32_t *newAccessMode,
2397 uint32_t *newDenyMode,
2398 int *delegated)
2399 {
2400 /*
2401 * Calculate new modes: a mode bit gets removed when there's only
2402 * one count in all the corresponding counts
2403 */
2404 *newAccessMode = nofp->nof_access;
2405 *newDenyMode = nofp->nof_deny;
2406
2407 if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) &&
2408 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) &&
2409 ((nofp->nof_r + nofp->nof_d_r +
2410 nofp->nof_rw + nofp->nof_d_rw +
2411 nofp->nof_r_dw + nofp->nof_d_r_dw +
2412 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2413 nofp->nof_r_drw + nofp->nof_d_r_drw +
2414 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2415 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2416 }
2417 if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2418 (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) &&
2419 ((nofp->nof_w + nofp->nof_d_w +
2420 nofp->nof_rw + nofp->nof_d_rw +
2421 nofp->nof_w_dw + nofp->nof_d_w_dw +
2422 nofp->nof_rw_dw + nofp->nof_d_rw_dw +
2423 nofp->nof_w_drw + nofp->nof_d_w_drw +
2424 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2425 *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE;
2426 }
2427 if ((denyMode & NFS_OPEN_SHARE_DENY_READ) &&
2428 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) &&
2429 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2430 nofp->nof_w_drw + nofp->nof_d_w_drw +
2431 nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) {
2432 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ;
2433 }
2434 if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) &&
2435 (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) &&
2436 ((nofp->nof_r_drw + nofp->nof_d_r_drw +
2437 nofp->nof_w_drw + nofp->nof_d_w_drw +
2438 nofp->nof_rw_drw + nofp->nof_d_rw_drw +
2439 nofp->nof_r_dw + nofp->nof_d_r_dw +
2440 nofp->nof_w_dw + nofp->nof_d_w_dw +
2441 nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) {
2442 *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE;
2443 }
2444
2445 /* Find the corresponding open access/deny mode counter. */
2446 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2447 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2448 *delegated = (nofp->nof_d_r != 0);
2449 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2450 *delegated = (nofp->nof_d_w != 0);
2451 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2452 *delegated = (nofp->nof_d_rw != 0);
2453 } else {
2454 *delegated = 0;
2455 }
2456 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2457 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2458 *delegated = (nofp->nof_d_r_dw != 0);
2459 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2460 *delegated = (nofp->nof_d_w_dw != 0);
2461 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2462 *delegated = (nofp->nof_d_rw_dw != 0);
2463 } else {
2464 *delegated = 0;
2465 }
2466 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2467 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2468 *delegated = (nofp->nof_d_r_drw != 0);
2469 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2470 *delegated = (nofp->nof_d_w_drw != 0);
2471 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2472 *delegated = (nofp->nof_d_rw_drw != 0);
2473 } else {
2474 *delegated = 0;
2475 }
2476 }
2477 }
2478
2479 /*
2480 * Remove the open state for the given access/deny modes to this open file.
2481 */
2482 void
2483 nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode)
2484 {
2485 uint32_t newAccessMode, newDenyMode;
2486 int delegated = 0;
2487
2488 lck_mtx_lock(&nofp->nof_lock);
2489 nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated);
2490
2491 /* Decrement the corresponding open access/deny mode counter. */
2492 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) {
2493 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2494 if (delegated) {
2495 if (nofp->nof_d_r == 0) {
2496 NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2497 } else {
2498 nofp->nof_d_r--;
2499 }
2500 } else {
2501 if (nofp->nof_r == 0) {
2502 NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2503 } else {
2504 nofp->nof_r--;
2505 }
2506 }
2507 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2508 if (delegated) {
2509 if (nofp->nof_d_w == 0) {
2510 NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2511 } else {
2512 nofp->nof_d_w--;
2513 }
2514 } else {
2515 if (nofp->nof_w == 0) {
2516 NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2517 } else {
2518 nofp->nof_w--;
2519 }
2520 }
2521 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2522 if (delegated) {
2523 if (nofp->nof_d_rw == 0) {
2524 NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2525 } else {
2526 nofp->nof_d_rw--;
2527 }
2528 } else {
2529 if (nofp->nof_rw == 0) {
2530 NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2531 } else {
2532 nofp->nof_rw--;
2533 }
2534 }
2535 }
2536 } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) {
2537 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2538 if (delegated) {
2539 if (nofp->nof_d_r_dw == 0) {
2540 NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2541 } else {
2542 nofp->nof_d_r_dw--;
2543 }
2544 } else {
2545 if (nofp->nof_r_dw == 0) {
2546 NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2547 } else {
2548 nofp->nof_r_dw--;
2549 }
2550 }
2551 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2552 if (delegated) {
2553 if (nofp->nof_d_w_dw == 0) {
2554 NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2555 } else {
2556 nofp->nof_d_w_dw--;
2557 }
2558 } else {
2559 if (nofp->nof_w_dw == 0) {
2560 NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2561 } else {
2562 nofp->nof_w_dw--;
2563 }
2564 }
2565 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2566 if (delegated) {
2567 if (nofp->nof_d_rw_dw == 0) {
2568 NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2569 } else {
2570 nofp->nof_d_rw_dw--;
2571 }
2572 } else {
2573 if (nofp->nof_rw_dw == 0) {
2574 NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2575 } else {
2576 nofp->nof_rw_dw--;
2577 }
2578 }
2579 }
2580 } else { /* NFS_OPEN_SHARE_DENY_BOTH */
2581 if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) {
2582 if (delegated) {
2583 if (nofp->nof_d_r_drw == 0) {
2584 NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2585 } else {
2586 nofp->nof_d_r_drw--;
2587 }
2588 } else {
2589 if (nofp->nof_r_drw == 0) {
2590 NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2591 } else {
2592 nofp->nof_r_drw--;
2593 }
2594 }
2595 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) {
2596 if (delegated) {
2597 if (nofp->nof_d_w_drw == 0) {
2598 NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2599 } else {
2600 nofp->nof_d_w_drw--;
2601 }
2602 } else {
2603 if (nofp->nof_w_drw == 0) {
2604 NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2605 } else {
2606 nofp->nof_w_drw--;
2607 }
2608 }
2609 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
2610 if (delegated) {
2611 if (nofp->nof_d_rw_drw == 0) {
2612 NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2613 } else {
2614 nofp->nof_d_rw_drw--;
2615 }
2616 } else {
2617 if (nofp->nof_rw_drw == 0) {
2618 NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
2619 } else {
2620 nofp->nof_rw_drw--;
2621 }
2622 }
2623 }
2624 }
2625
2626 /* update the modes */
2627 nofp->nof_access = newAccessMode;
2628 nofp->nof_deny = newDenyMode;
2629 nofp->nof_opencnt--;
2630 lck_mtx_unlock(&nofp->nof_lock);
2631 }
2632
2633 #if CONFIG_NFS4
2634 /*
2635 * Get the current (delegation, lock, open, default) stateid for this node.
2636 * If node has a delegation, use that stateid.
2637 * If pid has a lock, use the lockowner's stateid.
2638 * Or use the open file's stateid.
2639 * If no open file, use a default stateid of all ones.
2640 */
2641 void
2642 nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid)
2643 {
2644 struct nfsmount *nmp = NFSTONMP(np);
2645 proc_t p = thd ? get_bsdthreadtask_info(thd) : current_proc(); // XXX async I/O requests don't have a thread
2646 struct nfs_open_owner *noop = NULL;
2647 struct nfs_open_file *nofp = NULL;
2648 struct nfs_lock_owner *nlop = NULL;
2649 nfs_stateid *s = NULL;
2650
2651 if (np->n_openflags & N_DELEG_MASK) {
2652 s = &np->n_dstateid;
2653 } else {
2654 if (p) {
2655 nlop = nfs_lock_owner_find(np, p, 0);
2656 }
2657 if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) {
2658 /* we hold locks, use lock stateid */
2659 s = &nlop->nlo_stateid;
2660 } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) &&
2661 (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) &&
2662 !(nofp->nof_flags & NFS_OPEN_FILE_LOST) &&
2663 nofp->nof_access) {
2664 /* we (should) have the file open, use open stateid */
2665 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
2666 nfs4_reopen(nofp, thd);
2667 }
2668 if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
2669 s = &nofp->nof_stateid;
2670 }
2671 }
2672 }
2673
2674 if (s) {
2675 sid->seqid = s->seqid;
2676 sid->other[0] = s->other[0];
2677 sid->other[1] = s->other[1];
2678 sid->other[2] = s->other[2];
2679 } else {
2680 /* named attributes may not have a stateid for reads, so don't complain for them */
2681 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
2682 NP(np, "nfs_get_stateid: no stateid");
2683 }
2684 sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff;
2685 }
2686 if (nlop) {
2687 nfs_lock_owner_rele(nlop);
2688 }
2689 if (noop) {
2690 nfs_open_owner_rele(noop);
2691 }
2692 }
2693
2694
2695 /*
2696 * When we have a delegation, we may be able to perform the OPEN locally.
2697 * Perform the OPEN by checking the delegation ACE and/or checking via ACCESS.
2698 */
2699 int
2700 nfs4_open_delegated(
2701 nfsnode_t np,
2702 struct nfs_open_file *nofp,
2703 uint32_t accessMode,
2704 uint32_t denyMode,
2705 vfs_context_t ctx)
2706 {
2707 int error = 0, ismember, readtoo = 0, authorized = 0;
2708 uint32_t action;
2709 struct kauth_acl_eval eval;
2710 kauth_cred_t cred = vfs_context_ucred(ctx);
2711
2712 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2713 /*
2714 * Try to open it for read access too,
2715 * so the buffer cache can read data.
2716 */
2717 readtoo = 1;
2718 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2719 }
2720
2721 tryagain:
2722 action = 0;
2723 if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) {
2724 action |= KAUTH_VNODE_READ_DATA;
2725 }
2726 if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) {
2727 action |= KAUTH_VNODE_WRITE_DATA;
2728 }
2729
2730 /* evaluate ACE (if we have one) */
2731 if (np->n_dace.ace_flags) {
2732 eval.ae_requested = action;
2733 eval.ae_acl = &np->n_dace;
2734 eval.ae_count = 1;
2735 eval.ae_options = 0;
2736 if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) {
2737 eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
2738 }
2739 error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember);
2740 if (!error && ismember) {
2741 eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
2742 }
2743
2744 eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
2745 eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
2746 eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
2747 eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
2748
2749 error = kauth_acl_evaluate(cred, &eval);
2750
2751 if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) {
2752 authorized = 1;
2753 }
2754 }
2755
2756 if (!authorized) {
2757 /* need to ask the server via ACCESS */
2758 struct vnop_access_args naa;
2759 naa.a_desc = &vnop_access_desc;
2760 naa.a_vp = NFSTOV(np);
2761 naa.a_action = action;
2762 naa.a_context = ctx;
2763 if (!(error = nfs_vnop_access(&naa))) {
2764 authorized = 1;
2765 }
2766 }
2767
2768 if (!authorized) {
2769 if (readtoo) {
2770 /* try again without the extra read access */
2771 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2772 readtoo = 0;
2773 goto tryagain;
2774 }
2775 return error ? error : EACCES;
2776 }
2777
2778 nfs_open_file_add_open(nofp, accessMode, denyMode, 1);
2779
2780 return 0;
2781 }
2782
2783
2784 /*
2785 * Open a file with the given access/deny modes.
2786 *
2787 * If we have a delegation, we may be able to handle the open locally.
2788 * Otherwise, we will always send the open RPC even if this open's mode is
2789 * a subset of all the existing opens. This makes sure that we will always
2790 * be able to do a downgrade to any of the open modes.
2791 *
2792 * Note: local conflicts should have already been checked in nfs_open_file_find().
2793 */
2794 int
2795 nfs4_open(
2796 nfsnode_t np,
2797 struct nfs_open_file *nofp,
2798 uint32_t accessMode,
2799 uint32_t denyMode,
2800 vfs_context_t ctx)
2801 {
2802 vnode_t vp = NFSTOV(np);
2803 vnode_t dvp = NULL;
2804 struct componentname cn;
2805 const char *vname = NULL;
2806 size_t namelen;
2807 char smallname[128];
2808 char *filename = NULL;
2809 int error = 0, readtoo = 0;
2810
2811 /*
2812 * We can handle the OPEN ourselves if we have a delegation,
2813 * unless it's a read delegation and the open is asking for
2814 * either write access or deny read. We also don't bother to
2815 * use the delegation if it's being returned.
2816 */
2817 if (np->n_openflags & N_DELEG_MASK) {
2818 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
2819 return error;
2820 }
2821 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) &&
2822 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ||
2823 (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) {
2824 error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx);
2825 nfs_open_state_clear_busy(np);
2826 return error;
2827 }
2828 nfs_open_state_clear_busy(np);
2829 }
2830
2831 /*
2832 * [sigh] We can't trust VFS to get the parent right for named
2833 * attribute nodes. (It likes to reparent the nodes after we've
2834 * created them.) Luckily we can probably get the right parent
2835 * from the n_parent we have stashed away.
2836 */
2837 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
2838 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
2839 dvp = NULL;
2840 }
2841 if (!dvp) {
2842 dvp = vnode_getparent(vp);
2843 }
2844 vname = vnode_getname(vp);
2845 if (!dvp || !vname) {
2846 if (!error) {
2847 error = EIO;
2848 }
2849 goto out;
2850 }
2851 filename = &smallname[0];
2852 namelen = snprintf(filename, sizeof(smallname), "%s", vname);
2853 if (namelen >= sizeof(smallname)) {
2854 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
2855 if (!filename) {
2856 error = ENOMEM;
2857 goto out;
2858 }
2859 snprintf(filename, namelen + 1, "%s", vname);
2860 }
2861 bzero(&cn, sizeof(cn));
2862 cn.cn_nameptr = filename;
2863 cn.cn_namelen = namelen;
2864
2865 if (!(accessMode & NFS_OPEN_SHARE_ACCESS_READ)) {
2866 /*
2867 * Try to open it for read access too,
2868 * so the buffer cache can read data.
2869 */
2870 readtoo = 1;
2871 accessMode |= NFS_OPEN_SHARE_ACCESS_READ;
2872 }
2873 tryagain:
2874 error = nfs4_open_rpc(nofp, ctx, &cn, NULL, dvp, &vp, NFS_OPEN_NOCREATE, accessMode, denyMode);
2875 if (error) {
2876 if (!nfs_mount_state_error_should_restart(error) &&
2877 (error != EINTR) && (error != ERESTART) && readtoo) {
2878 /* try again without the extra read access */
2879 accessMode &= ~NFS_OPEN_SHARE_ACCESS_READ;
2880 readtoo = 0;
2881 goto tryagain;
2882 }
2883 goto out;
2884 }
2885 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
2886 out:
2887 if (filename && (filename != &smallname[0])) {
2888 FREE(filename, M_TEMP);
2889 }
2890 if (vname) {
2891 vnode_putname(vname);
2892 }
2893 if (dvp != NULLVP) {
2894 vnode_put(dvp);
2895 }
2896 return error;
2897 }
2898 #endif /* CONFIG_NFS4 */
2899
2900 int
2901 nfs_vnop_mmap(
2902 struct vnop_mmap_args /* {
2903 * struct vnodeop_desc *a_desc;
2904 * vnode_t a_vp;
2905 * int a_fflags;
2906 * vfs_context_t a_context;
2907 * } */*ap)
2908 {
2909 vfs_context_t ctx = ap->a_context;
2910 vnode_t vp = ap->a_vp;
2911 nfsnode_t np = VTONFS(vp);
2912 int error = 0, accessMode, denyMode, delegated;
2913 struct nfsmount *nmp;
2914 struct nfs_open_owner *noop = NULL;
2915 struct nfs_open_file *nofp = NULL;
2916
2917 nmp = VTONMP(vp);
2918 if (nfs_mount_gone(nmp)) {
2919 return ENXIO;
2920 }
2921
2922 if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) {
2923 return EINVAL;
2924 }
2925 if (np->n_flag & NREVOKE) {
2926 return EIO;
2927 }
2928
2929 /*
2930 * fflags contains some combination of: PROT_READ, PROT_WRITE
2931 * Since it's not possible to mmap() without having the file open for reading,
2932 * read access is always there (regardless if PROT_READ is not set).
2933 */
2934 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
2935 if (ap->a_fflags & PROT_WRITE) {
2936 accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE;
2937 }
2938 denyMode = NFS_OPEN_SHARE_DENY_NONE;
2939
2940 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
2941 if (!noop) {
2942 return ENOMEM;
2943 }
2944
2945 restart:
2946 error = nfs_mount_state_in_use_start(nmp, NULL);
2947 if (error) {
2948 nfs_open_owner_rele(noop);
2949 return error;
2950 }
2951 if (np->n_flag & NREVOKE) {
2952 error = EIO;
2953 nfs_mount_state_in_use_end(nmp, 0);
2954 nfs_open_owner_rele(noop);
2955 return error;
2956 }
2957
2958 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
2959 if (error || (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST))) {
2960 NP(np, "nfs_vnop_mmap: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred));
2961 error = EPERM;
2962 }
2963 #if CONFIG_NFS4
2964 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
2965 nfs_mount_state_in_use_end(nmp, 0);
2966 error = nfs4_reopen(nofp, NULL);
2967 nofp = NULL;
2968 if (!error) {
2969 goto restart;
2970 }
2971 }
2972 #endif
2973 if (!error) {
2974 error = nfs_open_file_set_busy(nofp, NULL);
2975 }
2976 if (error) {
2977 nofp = NULL;
2978 goto out;
2979 }
2980
2981 /*
2982 * The open reference for mmap must mirror an existing open because
2983 * we may need to reclaim it after the file is closed.
2984 * So grab another open count matching the accessMode passed in.
2985 * If we already had an mmap open, prefer read/write without deny mode.
2986 * This means we may have to drop the current mmap open first.
2987 *
2988 * N.B. We should have an open for the mmap, because, mmap was
2989 * called on an open descriptor, or we've created an open for read
2990 * from reading the first page for execve. However, if we piggy
2991 * backed on an existing NFS_OPEN_SHARE_ACCESS_READ/NFS_OPEN_SHARE_DENY_NONE
2992 * that open may have closed.
2993 */
2994
2995 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
2996 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
2997 /* We shouldn't get here. We've already open the file for execve */
2998 NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld",
2999 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
3000 }
3001 /*
3002 * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ
3003 * or the access would be denied. Other accesses should have an open descriptor for the mapping.
3004 */
3005 if (accessMode != NFS_OPEN_SHARE_ACCESS_READ || (accessMode & nofp->nof_deny)) {
3006 /* not asking for just read access -> fail */
3007 error = EPERM;
3008 goto out;
3009 }
3010 /* we don't have the file open, so open it for read access */
3011 if (nmp->nm_vers < NFS_VER4) {
3012 /* NFS v2/v3 opens are always allowed - so just add it. */
3013 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
3014 error = 0;
3015 }
3016 #if CONFIG_NFS4
3017 else {
3018 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
3019 }
3020 #endif
3021 if (!error) {
3022 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
3023 }
3024 if (error) {
3025 goto out;
3026 }
3027 }
3028
3029 /* determine deny mode for open */
3030 if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) {
3031 if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3032 delegated = 1;
3033 if (nofp->nof_d_rw) {
3034 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3035 } else if (nofp->nof_d_rw_dw) {
3036 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3037 } else if (nofp->nof_d_rw_drw) {
3038 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3039 }
3040 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3041 delegated = 0;
3042 if (nofp->nof_rw) {
3043 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3044 } else if (nofp->nof_rw_dw) {
3045 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3046 } else if (nofp->nof_rw_drw) {
3047 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3048 }
3049 } else {
3050 error = EPERM;
3051 }
3052 } else { /* NFS_OPEN_SHARE_ACCESS_READ */
3053 if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) {
3054 delegated = 1;
3055 if (nofp->nof_d_r) {
3056 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3057 } else if (nofp->nof_d_r_dw) {
3058 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3059 } else if (nofp->nof_d_r_drw) {
3060 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3061 }
3062 } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) {
3063 delegated = 0;
3064 if (nofp->nof_r) {
3065 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3066 } else if (nofp->nof_r_dw) {
3067 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3068 } else if (nofp->nof_r_drw) {
3069 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3070 }
3071 } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) {
3072 /*
3073 * This clause and the one below is to co-opt a read write access
3074 * for a read only mmaping. We probably got here in that an
3075 * existing rw open for an executable file already exists.
3076 */
3077 delegated = 1;
3078 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3079 if (nofp->nof_d_rw) {
3080 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3081 } else if (nofp->nof_d_rw_dw) {
3082 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3083 } else if (nofp->nof_d_rw_drw) {
3084 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3085 }
3086 } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) {
3087 delegated = 0;
3088 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
3089 if (nofp->nof_rw) {
3090 denyMode = NFS_OPEN_SHARE_DENY_NONE;
3091 } else if (nofp->nof_rw_dw) {
3092 denyMode = NFS_OPEN_SHARE_DENY_WRITE;
3093 } else if (nofp->nof_rw_drw) {
3094 denyMode = NFS_OPEN_SHARE_DENY_BOTH;
3095 }
3096 } else {
3097 error = EPERM;
3098 }
3099 }
3100 if (error) { /* mmap mode without proper open mode */
3101 goto out;
3102 }
3103
3104 /*
3105 * If the existing mmap access is more than the new access OR the
3106 * existing access is the same and the existing deny mode is less,
3107 * then we'll stick with the existing mmap open mode.
3108 */
3109 if ((nofp->nof_mmap_access > accessMode) ||
3110 ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) {
3111 goto out;
3112 }
3113
3114 /* update mmap open mode */
3115 if (nofp->nof_mmap_access) {
3116 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3117 if (error) {
3118 if (!nfs_mount_state_error_should_restart(error)) {
3119 NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3120 }
3121 NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3122 goto out;
3123 }
3124 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3125 }
3126
3127 nfs_open_file_add_open(nofp, accessMode, denyMode, delegated);
3128 nofp->nof_mmap_access = accessMode;
3129 nofp->nof_mmap_deny = denyMode;
3130
3131 out:
3132 if (nofp) {
3133 nfs_open_file_clear_busy(nofp);
3134 }
3135 if (nfs_mount_state_in_use_end(nmp, error)) {
3136 nofp = NULL;
3137 goto restart;
3138 }
3139 if (noop) {
3140 nfs_open_owner_rele(noop);
3141 }
3142
3143 if (!error) {
3144 int ismapped = 0;
3145 nfs_node_lock_force(np);
3146 if ((np->n_flag & NISMAPPED) == 0) {
3147 np->n_flag |= NISMAPPED;
3148 ismapped = 1;
3149 }
3150 nfs_node_unlock(np);
3151 if (ismapped) {
3152 lck_mtx_lock(&nmp->nm_lock);
3153 nmp->nm_state &= ~NFSSTA_SQUISHY;
3154 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
3155 if (nmp->nm_curdeadtimeout <= 0) {
3156 nmp->nm_deadto_start = 0;
3157 }
3158 nmp->nm_mappers++;
3159 lck_mtx_unlock(&nmp->nm_lock);
3160 }
3161 }
3162
3163 return error;
3164 }
3165
3166
3167 int
3168 nfs_vnop_mnomap(
3169 struct vnop_mnomap_args /* {
3170 * struct vnodeop_desc *a_desc;
3171 * vnode_t a_vp;
3172 * vfs_context_t a_context;
3173 * } */*ap)
3174 {
3175 vfs_context_t ctx = ap->a_context;
3176 vnode_t vp = ap->a_vp;
3177 nfsnode_t np = VTONFS(vp);
3178 struct nfsmount *nmp;
3179 struct nfs_open_file *nofp = NULL;
3180 off_t size;
3181 int error;
3182 int is_mapped_flag = 0;
3183
3184 nmp = VTONMP(vp);
3185 if (nfs_mount_gone(nmp)) {
3186 return ENXIO;
3187 }
3188
3189 nfs_node_lock_force(np);
3190 if (np->n_flag & NISMAPPED) {
3191 is_mapped_flag = 1;
3192 np->n_flag &= ~NISMAPPED;
3193 }
3194 nfs_node_unlock(np);
3195 if (is_mapped_flag) {
3196 lck_mtx_lock(&nmp->nm_lock);
3197 if (nmp->nm_mappers) {
3198 nmp->nm_mappers--;
3199 } else {
3200 NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped");
3201 }
3202 lck_mtx_unlock(&nmp->nm_lock);
3203 }
3204
3205 /* flush buffers/ubc before we drop the open (in case it's our last open) */
3206 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
3207 if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) {
3208 ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC);
3209 }
3210
3211 /* walk all open files and close all mmap opens */
3212 loop:
3213 error = nfs_mount_state_in_use_start(nmp, NULL);
3214 if (error) {
3215 return error;
3216 }
3217 lck_mtx_lock(&np->n_openlock);
3218 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
3219 if (!nofp->nof_mmap_access) {
3220 continue;
3221 }
3222 lck_mtx_unlock(&np->n_openlock);
3223 #if CONFIG_NFS4
3224 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
3225 nfs_mount_state_in_use_end(nmp, 0);
3226 error = nfs4_reopen(nofp, NULL);
3227 if (!error) {
3228 goto loop;
3229 }
3230 }
3231 #endif
3232 if (!error) {
3233 error = nfs_open_file_set_busy(nofp, NULL);
3234 }
3235 if (error) {
3236 lck_mtx_lock(&np->n_openlock);
3237 break;
3238 }
3239 if (nofp->nof_mmap_access) {
3240 error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx);
3241 if (!nfs_mount_state_error_should_restart(error)) {
3242 if (error) { /* not a state-operation-restarting error, so just clear the access */
3243 NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3244 }
3245 nofp->nof_mmap_access = nofp->nof_mmap_deny = 0;
3246 }
3247 if (error) {
3248 NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
3249 }
3250 }
3251 nfs_open_file_clear_busy(nofp);
3252 nfs_mount_state_in_use_end(nmp, error);
3253 goto loop;
3254 }
3255 lck_mtx_unlock(&np->n_openlock);
3256 nfs_mount_state_in_use_end(nmp, error);
3257 return error;
3258 }
3259
3260 /*
3261 * Search a node's lock owner list for the owner for this process.
3262 * If not found and "alloc" is set, then allocate a new one.
3263 */
3264 struct nfs_lock_owner *
3265 nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc)
3266 {
3267 pid_t pid = proc_pid(p);
3268 struct nfs_lock_owner *nlop, *newnlop = NULL;
3269
3270 tryagain:
3271 lck_mtx_lock(&np->n_openlock);
3272 TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) {
3273 os_ref_count_t newcount;
3274
3275 if (nlop->nlo_pid != pid) {
3276 continue;
3277 }
3278 if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) {
3279 break;
3280 }
3281 /* stale lock owner... reuse it if we can */
3282 if (os_ref_get_count(&nlop->nlo_refcnt)) {
3283 TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link);
3284 nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK;
3285 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3286 lck_mtx_unlock(&np->n_openlock);
3287 goto tryagain;
3288 }
3289 nlop->nlo_pid_start = p->p_start;
3290 nlop->nlo_seqid = 0;
3291 nlop->nlo_stategenid = 0;
3292 break;
3293 }
3294
3295 if (!nlop && !newnlop && alloc) {
3296 lck_mtx_unlock(&np->n_openlock);
3297 MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK);
3298 if (!newnlop) {
3299 return NULL;
3300 }
3301 bzero(newnlop, sizeof(*newnlop));
3302 lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL);
3303 newnlop->nlo_pid = pid;
3304 newnlop->nlo_pid_start = p->p_start;
3305 newnlop->nlo_name = OSAddAtomic(1, &nfs_lock_owner_seqnum);
3306 TAILQ_INIT(&newnlop->nlo_locks);
3307 goto tryagain;
3308 }
3309 if (!nlop && newnlop) {
3310 newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK;
3311 os_ref_init(&newnlop->nlo_refcnt, NULL);
3312 TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link);
3313 nlop = newnlop;
3314 }
3315 lck_mtx_unlock(&np->n_openlock);
3316
3317 if (newnlop && (nlop != newnlop)) {
3318 nfs_lock_owner_destroy(newnlop);
3319 }
3320
3321 if (nlop) {
3322 nfs_lock_owner_ref(nlop);
3323 }
3324
3325 return nlop;
3326 }
3327
3328 /*
3329 * destroy a lock owner that's no longer needed
3330 */
3331 void
3332 nfs_lock_owner_destroy(struct nfs_lock_owner *nlop)
3333 {
3334 if (nlop->nlo_open_owner) {
3335 nfs_open_owner_rele(nlop->nlo_open_owner);
3336 nlop->nlo_open_owner = NULL;
3337 }
3338 lck_mtx_destroy(&nlop->nlo_lock, nfs_open_grp);
3339 FREE(nlop, M_TEMP);
3340 }
3341
3342 /*
3343 * acquire a reference count on a lock owner
3344 */
3345 void
3346 nfs_lock_owner_ref(struct nfs_lock_owner *nlop)
3347 {
3348 lck_mtx_lock(&nlop->nlo_lock);
3349 os_ref_retain_locked(&nlop->nlo_refcnt);
3350 lck_mtx_unlock(&nlop->nlo_lock);
3351 }
3352
3353 /*
3354 * drop a reference count on a lock owner and destroy it if
3355 * it is no longer referenced and no longer on the mount's list.
3356 */
3357 void
3358 nfs_lock_owner_rele(struct nfs_lock_owner *nlop)
3359 {
3360 os_ref_count_t newcount;
3361
3362 lck_mtx_lock(&nlop->nlo_lock);
3363 if (os_ref_get_count(&nlop->nlo_refcnt) < 1) {
3364 panic("nfs_lock_owner_rele: no refcnt");
3365 }
3366 newcount = os_ref_release_locked(&nlop->nlo_refcnt);
3367 if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3368 panic("nfs_lock_owner_rele: busy");
3369 }
3370 /* XXX we may potentially want to clean up idle/unused lock owner structures */
3371 if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) {
3372 lck_mtx_unlock(&nlop->nlo_lock);
3373 return;
3374 }
3375 /* owner is no longer referenced or linked to mount, so destroy it */
3376 lck_mtx_unlock(&nlop->nlo_lock);
3377 nfs_lock_owner_destroy(nlop);
3378 }
3379
3380 /*
3381 * Mark a lock owner as busy because we are about to
3382 * start an operation that uses and updates lock owner state.
3383 */
3384 int
3385 nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd)
3386 {
3387 struct nfsmount *nmp;
3388 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3389 int error = 0, slpflag;
3390
3391 nmp = nlop->nlo_open_owner->noo_mount;
3392 if (nfs_mount_gone(nmp)) {
3393 return ENXIO;
3394 }
3395 slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0;
3396
3397 lck_mtx_lock(&nlop->nlo_lock);
3398 while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) {
3399 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
3400 break;
3401 }
3402 nlop->nlo_flags |= NFS_LOCK_OWNER_WANT;
3403 msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts);
3404 slpflag = 0;
3405 }
3406 if (!error) {
3407 nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY;
3408 }
3409 lck_mtx_unlock(&nlop->nlo_lock);
3410
3411 return error;
3412 }
3413
3414 /*
3415 * Clear the busy flag on a lock owner and wake up anyone waiting
3416 * to mark it busy.
3417 */
3418 void
3419 nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop)
3420 {
3421 int wanted;
3422
3423 lck_mtx_lock(&nlop->nlo_lock);
3424 if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) {
3425 panic("nfs_lock_owner_clear_busy");
3426 }
3427 wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT);
3428 nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT);
3429 lck_mtx_unlock(&nlop->nlo_lock);
3430 if (wanted) {
3431 wakeup(nlop);
3432 }
3433 }
3434
3435 /*
3436 * Insert a held lock into a lock owner's sorted list.
3437 * (flock locks are always inserted at the head the list)
3438 */
3439 void
3440 nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_lock *newnflp)
3441 {
3442 struct nfs_file_lock *nflp;
3443
3444 /* insert new lock in lock owner's held lock list */
3445 lck_mtx_lock(&nlop->nlo_lock);
3446 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) {
3447 TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink);
3448 } else {
3449 TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) {
3450 if (newnflp->nfl_start < nflp->nfl_start) {
3451 break;
3452 }
3453 }
3454 if (nflp) {
3455 TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink);
3456 } else {
3457 TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink);
3458 }
3459 }
3460 lck_mtx_unlock(&nlop->nlo_lock);
3461 }
3462
3463 /*
3464 * Get a file lock structure for this lock owner.
3465 */
3466 struct nfs_file_lock *
3467 nfs_file_lock_alloc(struct nfs_lock_owner *nlop)
3468 {
3469 struct nfs_file_lock *nflp = NULL;
3470
3471 lck_mtx_lock(&nlop->nlo_lock);
3472 if (!nlop->nlo_alock.nfl_owner) {
3473 nflp = &nlop->nlo_alock;
3474 nflp->nfl_owner = nlop;
3475 }
3476 lck_mtx_unlock(&nlop->nlo_lock);
3477 if (!nflp) {
3478 MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK);
3479 if (!nflp) {
3480 return NULL;
3481 }
3482 bzero(nflp, sizeof(*nflp));
3483 nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC;
3484 nflp->nfl_owner = nlop;
3485 }
3486 nfs_lock_owner_ref(nlop);
3487 return nflp;
3488 }
3489
3490 /*
3491 * destroy the given NFS file lock structure
3492 */
3493 void
3494 nfs_file_lock_destroy(struct nfs_file_lock *nflp)
3495 {
3496 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3497
3498 if (nflp->nfl_flags & NFS_FILE_LOCK_ALLOC) {
3499 nflp->nfl_owner = NULL;
3500 FREE(nflp, M_TEMP);
3501 } else {
3502 lck_mtx_lock(&nlop->nlo_lock);
3503 bzero(nflp, sizeof(*nflp));
3504 lck_mtx_unlock(&nlop->nlo_lock);
3505 }
3506 nfs_lock_owner_rele(nlop);
3507 }
3508
3509 /*
3510 * Check if one file lock conflicts with another.
3511 * (nflp1 is the new lock. nflp2 is the existing lock.)
3512 */
3513 int
3514 nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit)
3515 {
3516 /* no conflict if lock is dead */
3517 if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) {
3518 return 0;
3519 }
3520 /* no conflict if it's ours - unless the lock style doesn't match */
3521 if ((nflp1->nfl_owner == nflp2->nfl_owner) &&
3522 ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) {
3523 if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) &&
3524 (nflp1->nfl_start > nflp2->nfl_start) &&
3525 (nflp1->nfl_end < nflp2->nfl_end)) {
3526 *willsplit = 1;
3527 }
3528 return 0;
3529 }
3530 /* no conflict if ranges don't overlap */
3531 if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) {
3532 return 0;
3533 }
3534 /* no conflict if neither lock is exclusive */
3535 if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) {
3536 return 0;
3537 }
3538 /* conflict */
3539 return 1;
3540 }
3541
3542 #if CONFIG_NFS4
3543 /*
3544 * Send an NFSv4 LOCK RPC to the server.
3545 */
3546 int
3547 nfs4_setlock_rpc(
3548 nfsnode_t np,
3549 struct nfs_open_file *nofp,
3550 struct nfs_file_lock *nflp,
3551 int reclaim,
3552 int flags,
3553 thread_t thd,
3554 kauth_cred_t cred)
3555 {
3556 struct nfs_lock_owner *nlop = nflp->nfl_owner;
3557 struct nfsmount *nmp;
3558 struct nfsm_chain nmreq, nmrep;
3559 uint64_t xid;
3560 uint32_t locktype;
3561 int error = 0, lockerror = ENOENT, newlocker, numops, status;
3562 struct nfsreq_secinfo_args si;
3563
3564 nmp = NFSTONMP(np);
3565 if (nfs_mount_gone(nmp)) {
3566 return ENXIO;
3567 }
3568 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3569 return EINVAL;
3570 }
3571
3572 newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid);
3573 locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ?
3574 ((nflp->nfl_type == F_WRLCK) ?
3575 NFS_LOCK_TYPE_WRITEW :
3576 NFS_LOCK_TYPE_READW) :
3577 ((nflp->nfl_type == F_WRLCK) ?
3578 NFS_LOCK_TYPE_WRITE :
3579 NFS_LOCK_TYPE_READ);
3580 if (newlocker) {
3581 error = nfs_open_file_set_busy(nofp, thd);
3582 if (error) {
3583 return error;
3584 }
3585 error = nfs_open_owner_set_busy(nofp->nof_owner, thd);
3586 if (error) {
3587 nfs_open_file_clear_busy(nofp);
3588 return error;
3589 }
3590 if (!nlop->nlo_open_owner) {
3591 nfs_open_owner_ref(nofp->nof_owner);
3592 nlop->nlo_open_owner = nofp->nof_owner;
3593 }
3594 }
3595 error = nfs_lock_owner_set_busy(nlop, thd);
3596 if (error) {
3597 if (newlocker) {
3598 nfs_open_owner_clear_busy(nofp->nof_owner);
3599 nfs_open_file_clear_busy(nofp);
3600 }
3601 return error;
3602 }
3603
3604 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3605 nfsm_chain_null(&nmreq);
3606 nfsm_chain_null(&nmrep);
3607
3608 // PUTFH, GETATTR, LOCK
3609 numops = 3;
3610 nfsm_chain_build_alloc_init(error, &nmreq, 33 * NFSX_UNSIGNED);
3611 nfsm_chain_add_compound_header(error, &nmreq, "lock", nmp->nm_minor_vers, numops);
3612 numops--;
3613 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3614 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3615 numops--;
3616 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3617 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3618 numops--;
3619 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCK);
3620 nfsm_chain_add_32(error, &nmreq, locktype);
3621 nfsm_chain_add_32(error, &nmreq, reclaim);
3622 nfsm_chain_add_64(error, &nmreq, nflp->nfl_start);
3623 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(nflp->nfl_start, nflp->nfl_end));
3624 nfsm_chain_add_32(error, &nmreq, newlocker);
3625 if (newlocker) {
3626 nfsm_chain_add_32(error, &nmreq, nofp->nof_owner->noo_seqid);
3627 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
3628 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3629 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3630 } else {
3631 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3632 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3633 }
3634 nfsm_chain_build_done(error, &nmreq);
3635 nfsm_assert(error, (numops == 0), EPROTO);
3636 nfsmout_if(error);
3637
3638 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3639
3640 if ((lockerror = nfs_node_lock(np))) {
3641 error = lockerror;
3642 }
3643 nfsm_chain_skip_tag(error, &nmrep);
3644 nfsm_chain_get_32(error, &nmrep, numops);
3645 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3646 nfsmout_if(error);
3647 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3648 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3649 nfsmout_if(error);
3650 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCK);
3651 nfs_owner_seqid_increment(newlocker ? nofp->nof_owner : NULL, nlop, error);
3652 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3653
3654 /* Update the lock owner's stategenid once it appears the server has state for it. */
3655 /* We determine this by noting the request was successful (we got a stateid). */
3656 if (newlocker && !error) {
3657 nlop->nlo_stategenid = nmp->nm_stategenid;
3658 }
3659 nfsmout:
3660 if (!lockerror) {
3661 nfs_node_unlock(np);
3662 }
3663 nfs_lock_owner_clear_busy(nlop);
3664 if (newlocker) {
3665 nfs_open_owner_clear_busy(nofp->nof_owner);
3666 nfs_open_file_clear_busy(nofp);
3667 }
3668 nfsm_chain_cleanup(&nmreq);
3669 nfsm_chain_cleanup(&nmrep);
3670 return error;
3671 }
3672
3673 /*
3674 * Send an NFSv4 LOCKU RPC to the server.
3675 */
3676 int
3677 nfs4_unlock_rpc(
3678 nfsnode_t np,
3679 struct nfs_lock_owner *nlop,
3680 int type,
3681 uint64_t start,
3682 uint64_t end,
3683 int flags,
3684 thread_t thd,
3685 kauth_cred_t cred)
3686 {
3687 struct nfsmount *nmp;
3688 struct nfsm_chain nmreq, nmrep;
3689 uint64_t xid;
3690 int error = 0, lockerror = ENOENT, numops, status;
3691 struct nfsreq_secinfo_args si;
3692
3693 nmp = NFSTONMP(np);
3694 if (nfs_mount_gone(nmp)) {
3695 return ENXIO;
3696 }
3697 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3698 return EINVAL;
3699 }
3700
3701 error = nfs_lock_owner_set_busy(nlop, NULL);
3702 if (error) {
3703 return error;
3704 }
3705
3706 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3707 nfsm_chain_null(&nmreq);
3708 nfsm_chain_null(&nmrep);
3709
3710 // PUTFH, GETATTR, LOCKU
3711 numops = 3;
3712 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3713 nfsm_chain_add_compound_header(error, &nmreq, "unlock", nmp->nm_minor_vers, numops);
3714 numops--;
3715 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3716 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3717 numops--;
3718 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3719 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3720 numops--;
3721 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKU);
3722 nfsm_chain_add_32(error, &nmreq, (type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3723 nfsm_chain_add_32(error, &nmreq, nlop->nlo_seqid);
3724 nfsm_chain_add_stateid(error, &nmreq, &nlop->nlo_stateid);
3725 nfsm_chain_add_64(error, &nmreq, start);
3726 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3727 nfsm_chain_build_done(error, &nmreq);
3728 nfsm_assert(error, (numops == 0), EPROTO);
3729 nfsmout_if(error);
3730
3731 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
3732
3733 if ((lockerror = nfs_node_lock(np))) {
3734 error = lockerror;
3735 }
3736 nfsm_chain_skip_tag(error, &nmrep);
3737 nfsm_chain_get_32(error, &nmrep, numops);
3738 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3739 nfsmout_if(error);
3740 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3741 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3742 nfsmout_if(error);
3743 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKU);
3744 nfs_owner_seqid_increment(NULL, nlop, error);
3745 nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid);
3746 nfsmout:
3747 if (!lockerror) {
3748 nfs_node_unlock(np);
3749 }
3750 nfs_lock_owner_clear_busy(nlop);
3751 nfsm_chain_cleanup(&nmreq);
3752 nfsm_chain_cleanup(&nmrep);
3753 return error;
3754 }
3755
3756 /*
3757 * Send an NFSv4 LOCKT RPC to the server.
3758 */
3759 int
3760 nfs4_getlock_rpc(
3761 nfsnode_t np,
3762 struct nfs_lock_owner *nlop,
3763 struct flock *fl,
3764 uint64_t start,
3765 uint64_t end,
3766 vfs_context_t ctx)
3767 {
3768 struct nfsmount *nmp;
3769 struct nfsm_chain nmreq, nmrep;
3770 uint64_t xid, val64 = 0;
3771 uint32_t val = 0;
3772 int error = 0, lockerror, numops, status;
3773 struct nfsreq_secinfo_args si;
3774
3775 nmp = NFSTONMP(np);
3776 if (nfs_mount_gone(nmp)) {
3777 return ENXIO;
3778 }
3779 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
3780 return EINVAL;
3781 }
3782
3783 lockerror = ENOENT;
3784 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
3785 nfsm_chain_null(&nmreq);
3786 nfsm_chain_null(&nmrep);
3787
3788 // PUTFH, GETATTR, LOCKT
3789 numops = 3;
3790 nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED);
3791 nfsm_chain_add_compound_header(error, &nmreq, "locktest", nmp->nm_minor_vers, numops);
3792 numops--;
3793 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
3794 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, np->n_fhp, np->n_fhsize);
3795 numops--;
3796 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
3797 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
3798 numops--;
3799 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOCKT);
3800 nfsm_chain_add_32(error, &nmreq, (fl->l_type == F_WRLCK) ? NFS_LOCK_TYPE_WRITE : NFS_LOCK_TYPE_READ);
3801 nfsm_chain_add_64(error, &nmreq, start);
3802 nfsm_chain_add_64(error, &nmreq, NFS_LOCK_LENGTH(start, end));
3803 nfsm_chain_add_lock_owner4(error, &nmreq, nmp, nlop);
3804 nfsm_chain_build_done(error, &nmreq);
3805 nfsm_assert(error, (numops == 0), EPROTO);
3806 nfsmout_if(error);
3807
3808 error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
3809
3810 if ((lockerror = nfs_node_lock(np))) {
3811 error = lockerror;
3812 }
3813 nfsm_chain_skip_tag(error, &nmrep);
3814 nfsm_chain_get_32(error, &nmrep, numops);
3815 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
3816 nfsmout_if(error);
3817 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
3818 nfsm_chain_loadattr(error, &nmrep, np, NFS_VER4, &xid);
3819 nfsmout_if(error);
3820 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOCKT);
3821 if (error == NFSERR_DENIED) {
3822 error = 0;
3823 nfsm_chain_get_64(error, &nmrep, fl->l_start);
3824 nfsm_chain_get_64(error, &nmrep, val64);
3825 fl->l_len = (val64 == UINT64_MAX) ? 0 : val64;
3826 nfsm_chain_get_32(error, &nmrep, val);
3827 fl->l_type = (val == NFS_LOCK_TYPE_WRITE) ? F_WRLCK : F_RDLCK;
3828 fl->l_pid = 0;
3829 fl->l_whence = SEEK_SET;
3830 } else if (!error) {
3831 fl->l_type = F_UNLCK;
3832 }
3833 nfsmout:
3834 if (!lockerror) {
3835 nfs_node_unlock(np);
3836 }
3837 nfsm_chain_cleanup(&nmreq);
3838 nfsm_chain_cleanup(&nmrep);
3839 return error;
3840 }
3841 #endif /* CONFIG_NFS4 */
3842
3843 /*
3844 * Check for any conflicts with the given lock.
3845 *
3846 * Checking for a lock doesn't require the file to be opened.
3847 * So we skip all the open owner, open file, lock owner work
3848 * and just check for a conflicting lock.
3849 */
3850 int
3851 nfs_advlock_getlock(
3852 nfsnode_t np,
3853 struct nfs_lock_owner *nlop,
3854 struct flock *fl,
3855 uint64_t start,
3856 uint64_t end,
3857 vfs_context_t ctx)
3858 {
3859 struct nfsmount *nmp;
3860 struct nfs_file_lock *nflp;
3861 int error = 0, answered = 0;
3862
3863 nmp = NFSTONMP(np);
3864 if (nfs_mount_gone(nmp)) {
3865 return ENXIO;
3866 }
3867
3868 restart:
3869 if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) {
3870 return error;
3871 }
3872
3873 lck_mtx_lock(&np->n_openlock);
3874 /* scan currently held locks for conflict */
3875 TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) {
3876 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
3877 continue;
3878 }
3879 if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) &&
3880 ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) {
3881 break;
3882 }
3883 }
3884 if (nflp) {
3885 /* found a conflicting lock */
3886 fl->l_type = nflp->nfl_type;
3887 fl->l_pid = (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_FLOCK) ? -1 : nflp->nfl_owner->nlo_pid;
3888 fl->l_start = nflp->nfl_start;
3889 fl->l_len = NFS_FLOCK_LENGTH(nflp->nfl_start, nflp->nfl_end);
3890 fl->l_whence = SEEK_SET;
3891 answered = 1;
3892 } else if ((np->n_openflags & N_DELEG_WRITE) && !(np->n_openflags & N_DELEG_RETURN)) {
3893 /*
3894 * If we have a write delegation, we know there can't be other
3895 * locks on the server. So the answer is no conflicting lock found.
3896 */
3897 fl->l_type = F_UNLCK;
3898 answered = 1;
3899 }
3900 lck_mtx_unlock(&np->n_openlock);
3901 if (answered) {
3902 nfs_mount_state_in_use_end(nmp, 0);
3903 return 0;
3904 }
3905
3906 /* no conflict found locally, so ask the server */
3907 error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx);
3908
3909 if (nfs_mount_state_in_use_end(nmp, error)) {
3910 goto restart;
3911 }
3912 return error;
3913 }
3914
3915 /*
3916 * Acquire a file lock for the given range.
3917 *
3918 * Add the lock (request) to the lock queue.
3919 * Scan the lock queue for any conflicting locks.
3920 * If a conflict is found, block or return an error.
3921 * Once end of queue is reached, send request to the server.
3922 * If the server grants the lock, scan the lock queue and
3923 * update any existing locks. Then (optionally) scan the
3924 * queue again to coalesce any locks adjacent to the new one.
3925 */
3926 int
3927 nfs_advlock_setlock(
3928 nfsnode_t np,
3929 struct nfs_open_file *nofp,
3930 struct nfs_lock_owner *nlop,
3931 int op,
3932 uint64_t start,
3933 uint64_t end,
3934 int style,
3935 short type,
3936 vfs_context_t ctx)
3937 {
3938 struct nfsmount *nmp;
3939 struct nfs_file_lock *newnflp, *nflp, *nflp2 = NULL, *nextnflp, *flocknflp = NULL;
3940 struct nfs_file_lock *coalnflp;
3941 int error = 0, error2, willsplit = 0, delay, slpflag, busy = 0, inuse = 0, restart, inqueue = 0;
3942 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
3943
3944 nmp = NFSTONMP(np);
3945 if (nfs_mount_gone(nmp)) {
3946 return ENXIO;
3947 }
3948 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
3949
3950 if ((type != F_RDLCK) && (type != F_WRLCK)) {
3951 return EINVAL;
3952 }
3953
3954 /* allocate a new lock */
3955 newnflp = nfs_file_lock_alloc(nlop);
3956 if (!newnflp) {
3957 return ENOLCK;
3958 }
3959 newnflp->nfl_start = start;
3960 newnflp->nfl_end = end;
3961 newnflp->nfl_type = type;
3962 if (op == F_SETLKW) {
3963 newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT;
3964 }
3965 newnflp->nfl_flags |= style;
3966 newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED;
3967
3968 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && (type == F_WRLCK)) {
3969 /*
3970 * For exclusive flock-style locks, if we block waiting for the
3971 * lock, we need to first release any currently held shared
3972 * flock-style lock. So, the first thing we do is check if we
3973 * have a shared flock-style lock.
3974 */
3975 nflp = TAILQ_FIRST(&nlop->nlo_locks);
3976 if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) {
3977 nflp = NULL;
3978 }
3979 if (nflp && (nflp->nfl_type != F_RDLCK)) {
3980 nflp = NULL;
3981 }
3982 flocknflp = nflp;
3983 }
3984
3985 restart:
3986 restart = 0;
3987 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
3988 if (error) {
3989 goto error_out;
3990 }
3991 inuse = 1;
3992 if (np->n_flag & NREVOKE) {
3993 error = EIO;
3994 nfs_mount_state_in_use_end(nmp, 0);
3995 inuse = 0;
3996 goto error_out;
3997 }
3998 #if CONFIG_NFS4
3999 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4000 nfs_mount_state_in_use_end(nmp, 0);
4001 inuse = 0;
4002 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
4003 if (error) {
4004 goto error_out;
4005 }
4006 goto restart;
4007 }
4008 #endif
4009
4010 lck_mtx_lock(&np->n_openlock);
4011 if (!inqueue) {
4012 /* insert new lock at beginning of list */
4013 TAILQ_INSERT_HEAD(&np->n_locks, newnflp, nfl_link);
4014 inqueue = 1;
4015 }
4016
4017 /* scan current list of locks (held and pending) for conflicts */
4018 for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) {
4019 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4020 if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) {
4021 continue;
4022 }
4023 /* Conflict */
4024 if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4025 error = EAGAIN;
4026 break;
4027 }
4028 /* Block until this lock is no longer held. */
4029 if (nflp->nfl_blockcnt == UINT_MAX) {
4030 error = ENOLCK;
4031 break;
4032 }
4033 nflp->nfl_blockcnt++;
4034 do {
4035 if (flocknflp) {
4036 /* release any currently held shared lock before sleeping */
4037 lck_mtx_unlock(&np->n_openlock);
4038 nfs_mount_state_in_use_end(nmp, 0);
4039 inuse = 0;
4040 error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4041 flocknflp = NULL;
4042 if (!error) {
4043 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4044 }
4045 if (error) {
4046 lck_mtx_lock(&np->n_openlock);
4047 break;
4048 }
4049 inuse = 1;
4050 lck_mtx_lock(&np->n_openlock);
4051 /* no need to block/sleep if the conflict is gone */
4052 if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) {
4053 break;
4054 }
4055 }
4056 msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts);
4057 slpflag = 0;
4058 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4059 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4060 /* looks like we have a recover pending... restart */
4061 restart = 1;
4062 lck_mtx_unlock(&np->n_openlock);
4063 nfs_mount_state_in_use_end(nmp, 0);
4064 inuse = 0;
4065 lck_mtx_lock(&np->n_openlock);
4066 break;
4067 }
4068 if (!error && (np->n_flag & NREVOKE)) {
4069 error = EIO;
4070 }
4071 } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL));
4072 nflp->nfl_blockcnt--;
4073 if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) {
4074 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4075 nfs_file_lock_destroy(nflp);
4076 }
4077 if (error || restart) {
4078 break;
4079 }
4080 /* We have released n_openlock and we can't trust that nextnflp is still valid. */
4081 /* So, start this lock-scanning loop over from where it started. */
4082 nextnflp = TAILQ_NEXT(newnflp, nfl_link);
4083 }
4084 lck_mtx_unlock(&np->n_openlock);
4085 if (restart) {
4086 goto restart;
4087 }
4088 if (error) {
4089 goto error_out;
4090 }
4091
4092 if (willsplit) {
4093 /*
4094 * It looks like this operation is splitting a lock.
4095 * We allocate a new lock now so we don't have to worry
4096 * about the allocation failing after we've updated some state.
4097 */
4098 nflp2 = nfs_file_lock_alloc(nlop);
4099 if (!nflp2) {
4100 error = ENOLCK;
4101 goto error_out;
4102 }
4103 }
4104
4105 /* once scan for local conflicts is clear, send request to server */
4106 if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) {
4107 goto error_out;
4108 }
4109 busy = 1;
4110 delay = 0;
4111 do {
4112 #if CONFIG_NFS4
4113 /* do we have a delegation? (that we're not returning?) */
4114 if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN)) {
4115 if (np->n_openflags & N_DELEG_WRITE) {
4116 /* with a write delegation, just take the lock delegated */
4117 newnflp->nfl_flags |= NFS_FILE_LOCK_DELEGATED;
4118 error = 0;
4119 /* make sure the lock owner knows its open owner */
4120 if (!nlop->nlo_open_owner) {
4121 nfs_open_owner_ref(nofp->nof_owner);
4122 nlop->nlo_open_owner = nofp->nof_owner;
4123 }
4124 break;
4125 } else {
4126 /*
4127 * If we don't have any non-delegated opens but we do have
4128 * delegated opens, then we need to first claim the delegated
4129 * opens so that the lock request on the server can be associated
4130 * with an open it knows about.
4131 */
4132 if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw &&
4133 !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw &&
4134 !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) &&
4135 (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw ||
4136 nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw ||
4137 nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) {
4138 error = nfs4_claim_delegated_state_for_open_file(nofp, 0);
4139 if (error) {
4140 break;
4141 }
4142 }
4143 }
4144 }
4145 #endif
4146 if (np->n_flag & NREVOKE) {
4147 error = EIO;
4148 }
4149 if (!error) {
4150 error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx));
4151 }
4152 if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) {
4153 break;
4154 }
4155 /* request was denied due to either conflict or grace period */
4156 if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) {
4157 error = EAGAIN;
4158 break;
4159 }
4160 if (flocknflp) {
4161 /* release any currently held shared lock before sleeping */
4162 nfs_open_state_clear_busy(np);
4163 busy = 0;
4164 nfs_mount_state_in_use_end(nmp, 0);
4165 inuse = 0;
4166 error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx);
4167 flocknflp = NULL;
4168 if (!error2) {
4169 error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
4170 }
4171 if (!error2) {
4172 inuse = 1;
4173 error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx));
4174 }
4175 if (error2) {
4176 error = error2;
4177 break;
4178 }
4179 busy = 1;
4180 }
4181 /*
4182 * Wait a little bit and send the request again.
4183 * Except for retries of blocked v2/v3 request where we've already waited a bit.
4184 */
4185 if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) {
4186 if (error == NFSERR_GRACE) {
4187 delay = 4;
4188 }
4189 if (delay < 4) {
4190 delay++;
4191 }
4192 tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2));
4193 slpflag = 0;
4194 }
4195 error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0);
4196 if (!error && (nmp->nm_state & NFSSTA_RECOVER)) {
4197 /* looks like we have a recover pending... restart */
4198 nfs_open_state_clear_busy(np);
4199 busy = 0;
4200 nfs_mount_state_in_use_end(nmp, 0);
4201 inuse = 0;
4202 goto restart;
4203 }
4204 if (!error && (np->n_flag & NREVOKE)) {
4205 error = EIO;
4206 }
4207 } while (!error);
4208
4209 error_out:
4210 if (nfs_mount_state_error_should_restart(error)) {
4211 /* looks like we need to restart this operation */
4212 if (busy) {
4213 nfs_open_state_clear_busy(np);
4214 busy = 0;
4215 }
4216 if (inuse) {
4217 nfs_mount_state_in_use_end(nmp, error);
4218 inuse = 0;
4219 }
4220 goto restart;
4221 }
4222 lck_mtx_lock(&np->n_openlock);
4223 newnflp->nfl_flags &= ~NFS_FILE_LOCK_BLOCKED;
4224 if (error) {
4225 newnflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4226 if (newnflp->nfl_blockcnt) {
4227 /* wake up anyone blocked on this lock */
4228 wakeup(newnflp);
4229 } else {
4230 /* remove newnflp from lock list and destroy */
4231 if (inqueue) {
4232 TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link);
4233 }
4234 nfs_file_lock_destroy(newnflp);
4235 }
4236 lck_mtx_unlock(&np->n_openlock);
4237 if (busy) {
4238 nfs_open_state_clear_busy(np);
4239 }
4240 if (inuse) {
4241 nfs_mount_state_in_use_end(nmp, error);
4242 }
4243 if (nflp2) {
4244 nfs_file_lock_destroy(nflp2);
4245 }
4246 return error;
4247 }
4248
4249 /* server granted the lock */
4250
4251 /*
4252 * Scan for locks to update.
4253 *
4254 * Locks completely covered are killed.
4255 * At most two locks may need to be clipped.
4256 * It's possible that a single lock may need to be split.
4257 */
4258 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4259 if (nflp == newnflp) {
4260 continue;
4261 }
4262 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4263 continue;
4264 }
4265 if (nflp->nfl_owner != nlop) {
4266 continue;
4267 }
4268 if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) {
4269 continue;
4270 }
4271 if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) {
4272 continue;
4273 }
4274 /* here's one to update */
4275 if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) {
4276 /* The entire lock is being replaced. */
4277 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4278 lck_mtx_lock(&nlop->nlo_lock);
4279 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4280 lck_mtx_unlock(&nlop->nlo_lock);
4281 /* lock will be destroyed below, if no waiters */
4282 } else if ((newnflp->nfl_start > nflp->nfl_start) && (newnflp->nfl_end < nflp->nfl_end)) {
4283 /* We're replacing a range in the middle of a lock. */
4284 /* The current lock will be split into two locks. */
4285 /* Update locks and insert new lock after current lock. */
4286 nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4287 nflp2->nfl_type = nflp->nfl_type;
4288 nflp2->nfl_start = newnflp->nfl_end + 1;
4289 nflp2->nfl_end = nflp->nfl_end;
4290 nflp->nfl_end = newnflp->nfl_start - 1;
4291 TAILQ_INSERT_AFTER(&np->n_locks, nflp, nflp2, nfl_link);
4292 nfs_lock_owner_insert_held_lock(nlop, nflp2);
4293 nextnflp = nflp2;
4294 nflp2 = NULL;
4295 } else if (newnflp->nfl_start > nflp->nfl_start) {
4296 /* We're replacing the end of a lock. */
4297 nflp->nfl_end = newnflp->nfl_start - 1;
4298 } else if (newnflp->nfl_end < nflp->nfl_end) {
4299 /* We're replacing the start of a lock. */
4300 nflp->nfl_start = newnflp->nfl_end + 1;
4301 }
4302 if (nflp->nfl_blockcnt) {
4303 /* wake up anyone blocked on this lock */
4304 wakeup(nflp);
4305 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4306 /* remove nflp from lock list and destroy */
4307 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4308 nfs_file_lock_destroy(nflp);
4309 }
4310 }
4311
4312 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4313
4314 /*
4315 * POSIX locks should be coalesced when possible.
4316 */
4317 if ((style == NFS_FILE_LOCK_STYLE_POSIX) && (nofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK)) {
4318 /*
4319 * Walk through the lock queue and check each of our held locks with
4320 * the previous and next locks in the lock owner's "held lock list".
4321 * If the two locks can be coalesced, we merge the current lock into
4322 * the other (previous or next) lock. Merging this way makes sure that
4323 * lock ranges are always merged forward in the lock queue. This is
4324 * important because anyone blocked on the lock being "merged away"
4325 * will still need to block on that range and it will simply continue
4326 * checking locks that are further down the list.
4327 */
4328 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4329 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4330 continue;
4331 }
4332 if (nflp->nfl_owner != nlop) {
4333 continue;
4334 }
4335 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) {
4336 continue;
4337 }
4338 if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) &&
4339 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4340 (coalnflp->nfl_type == nflp->nfl_type) &&
4341 (coalnflp->nfl_end == (nflp->nfl_start - 1))) {
4342 coalnflp->nfl_end = nflp->nfl_end;
4343 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4344 lck_mtx_lock(&nlop->nlo_lock);
4345 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4346 lck_mtx_unlock(&nlop->nlo_lock);
4347 } else if (((coalnflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4348 ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) &&
4349 (coalnflp->nfl_type == nflp->nfl_type) &&
4350 (coalnflp->nfl_start == (nflp->nfl_end + 1))) {
4351 coalnflp->nfl_start = nflp->nfl_start;
4352 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4353 lck_mtx_lock(&nlop->nlo_lock);
4354 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4355 lck_mtx_unlock(&nlop->nlo_lock);
4356 }
4357 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) {
4358 continue;
4359 }
4360 if (nflp->nfl_blockcnt) {
4361 /* wake up anyone blocked on this lock */
4362 wakeup(nflp);
4363 } else {
4364 /* remove nflp from lock list and destroy */
4365 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4366 nfs_file_lock_destroy(nflp);
4367 }
4368 }
4369 }
4370
4371 lck_mtx_unlock(&np->n_openlock);
4372 nfs_open_state_clear_busy(np);
4373 nfs_mount_state_in_use_end(nmp, error);
4374
4375 if (nflp2) {
4376 nfs_file_lock_destroy(nflp2);
4377 }
4378 return error;
4379 }
4380
4381 /*
4382 * Release all (same style) locks within the given range.
4383 */
4384 int
4385 nfs_advlock_unlock(
4386 nfsnode_t np,
4387 struct nfs_open_file *nofp
4388 #if !CONFIG_NFS4
4389 __unused
4390 #endif
4391 ,
4392 struct nfs_lock_owner *nlop,
4393 uint64_t start,
4394 uint64_t end,
4395 int style,
4396 vfs_context_t ctx)
4397 {
4398 struct nfsmount *nmp;
4399 struct nfs_file_lock *nflp, *nextnflp, *newnflp = NULL;
4400 int error = 0, willsplit = 0, send_unlock_rpcs = 1;
4401
4402 nmp = NFSTONMP(np);
4403 if (nfs_mount_gone(nmp)) {
4404 return ENXIO;
4405 }
4406
4407 restart:
4408 if ((error = nfs_mount_state_in_use_start(nmp, NULL))) {
4409 return error;
4410 }
4411 #if CONFIG_NFS4
4412 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) {
4413 nfs_mount_state_in_use_end(nmp, 0);
4414 error = nfs4_reopen(nofp, NULL);
4415 if (error) {
4416 return error;
4417 }
4418 goto restart;
4419 }
4420 #endif
4421 if ((error = nfs_open_state_set_busy(np, NULL))) {
4422 nfs_mount_state_in_use_end(nmp, error);
4423 return error;
4424 }
4425
4426 lck_mtx_lock(&np->n_openlock);
4427 if ((start > 0) && (end < UINT64_MAX) && !willsplit) {
4428 /*
4429 * We may need to allocate a new lock if an existing lock gets split.
4430 * So, we first scan the list to check for a split, and if there's
4431 * going to be one, we'll allocate one now.
4432 */
4433 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4434 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4435 continue;
4436 }
4437 if (nflp->nfl_owner != nlop) {
4438 continue;
4439 }
4440 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4441 continue;
4442 }
4443 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4444 continue;
4445 }
4446 if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4447 willsplit = 1;
4448 break;
4449 }
4450 }
4451 if (willsplit) {
4452 lck_mtx_unlock(&np->n_openlock);
4453 nfs_open_state_clear_busy(np);
4454 nfs_mount_state_in_use_end(nmp, 0);
4455 newnflp = nfs_file_lock_alloc(nlop);
4456 if (!newnflp) {
4457 return ENOMEM;
4458 }
4459 goto restart;
4460 }
4461 }
4462
4463 /*
4464 * Free all of our locks in the given range.
4465 *
4466 * Note that this process requires sending requests to the server.
4467 * Because of this, we will release the n_openlock while performing
4468 * the unlock RPCs. The N_OPENBUSY state keeps the state of *held*
4469 * locks from changing underneath us. However, other entries in the
4470 * list may be removed. So we need to be careful walking the list.
4471 */
4472
4473 /*
4474 * Don't unlock ranges that are held by other-style locks.
4475 * If style is posix, don't send any unlock rpcs if flock is held.
4476 * If we unlock an flock, don't send unlock rpcs for any posix-style
4477 * ranges held - instead send unlocks for the ranges not held.
4478 */
4479 if ((style == NFS_FILE_LOCK_STYLE_POSIX) &&
4480 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4481 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) {
4482 send_unlock_rpcs = 0;
4483 }
4484 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) &&
4485 ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) &&
4486 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) &&
4487 ((nflp = TAILQ_NEXT(nflp, nfl_lolink))) &&
4488 ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX)) {
4489 uint64_t s = 0;
4490 int type = TAILQ_FIRST(&nlop->nlo_locks)->nfl_type;
4491 int delegated = (TAILQ_FIRST(&nlop->nlo_locks)->nfl_flags & NFS_FILE_LOCK_DELEGATED);
4492 while (!delegated && nflp) {
4493 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) {
4494 /* unlock the range preceding this lock */
4495 lck_mtx_unlock(&np->n_openlock);
4496 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0,
4497 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4498 if (nfs_mount_state_error_should_restart(error)) {
4499 nfs_open_state_clear_busy(np);
4500 nfs_mount_state_in_use_end(nmp, error);
4501 goto restart;
4502 }
4503 lck_mtx_lock(&np->n_openlock);
4504 if (error) {
4505 goto out;
4506 }
4507 s = nflp->nfl_end + 1;
4508 }
4509 nflp = TAILQ_NEXT(nflp, nfl_lolink);
4510 }
4511 if (!delegated) {
4512 lck_mtx_unlock(&np->n_openlock);
4513 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0,
4514 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4515 if (nfs_mount_state_error_should_restart(error)) {
4516 nfs_open_state_clear_busy(np);
4517 nfs_mount_state_in_use_end(nmp, error);
4518 goto restart;
4519 }
4520 lck_mtx_lock(&np->n_openlock);
4521 if (error) {
4522 goto out;
4523 }
4524 }
4525 send_unlock_rpcs = 0;
4526 }
4527
4528 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
4529 if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) {
4530 continue;
4531 }
4532 if (nflp->nfl_owner != nlop) {
4533 continue;
4534 }
4535 if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) {
4536 continue;
4537 }
4538 if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) {
4539 continue;
4540 }
4541 /* here's one to unlock */
4542 if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) {
4543 /* The entire lock is being unlocked. */
4544 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4545 lck_mtx_unlock(&np->n_openlock);
4546 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0,
4547 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4548 if (nfs_mount_state_error_should_restart(error)) {
4549 nfs_open_state_clear_busy(np);
4550 nfs_mount_state_in_use_end(nmp, error);
4551 goto restart;
4552 }
4553 lck_mtx_lock(&np->n_openlock);
4554 }
4555 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4556 if (error) {
4557 break;
4558 }
4559 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
4560 lck_mtx_lock(&nlop->nlo_lock);
4561 TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink);
4562 lck_mtx_unlock(&nlop->nlo_lock);
4563 /* lock will be destroyed below, if no waiters */
4564 } else if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) {
4565 /* We're unlocking a range in the middle of a lock. */
4566 /* The current lock will be split into two locks. */
4567 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4568 lck_mtx_unlock(&np->n_openlock);
4569 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0,
4570 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4571 if (nfs_mount_state_error_should_restart(error)) {
4572 nfs_open_state_clear_busy(np);
4573 nfs_mount_state_in_use_end(nmp, error);
4574 goto restart;
4575 }
4576 lck_mtx_lock(&np->n_openlock);
4577 }
4578 if (error) {
4579 break;
4580 }
4581 /* update locks and insert new lock after current lock */
4582 newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED));
4583 newnflp->nfl_type = nflp->nfl_type;
4584 newnflp->nfl_start = end + 1;
4585 newnflp->nfl_end = nflp->nfl_end;
4586 nflp->nfl_end = start - 1;
4587 TAILQ_INSERT_AFTER(&np->n_locks, nflp, newnflp, nfl_link);
4588 nfs_lock_owner_insert_held_lock(nlop, newnflp);
4589 nextnflp = newnflp;
4590 newnflp = NULL;
4591 } else if (start > nflp->nfl_start) {
4592 /* We're unlocking the end of a lock. */
4593 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4594 lck_mtx_unlock(&np->n_openlock);
4595 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0,
4596 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4597 if (nfs_mount_state_error_should_restart(error)) {
4598 nfs_open_state_clear_busy(np);
4599 nfs_mount_state_in_use_end(nmp, error);
4600 goto restart;
4601 }
4602 lck_mtx_lock(&np->n_openlock);
4603 }
4604 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4605 if (error) {
4606 break;
4607 }
4608 nflp->nfl_end = start - 1;
4609 } else if (end < nflp->nfl_end) {
4610 /* We're unlocking the start of a lock. */
4611 if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
4612 lck_mtx_unlock(&np->n_openlock);
4613 error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0,
4614 vfs_context_thread(ctx), vfs_context_ucred(ctx));
4615 if (nfs_mount_state_error_should_restart(error)) {
4616 nfs_open_state_clear_busy(np);
4617 nfs_mount_state_in_use_end(nmp, error);
4618 goto restart;
4619 }
4620 lck_mtx_lock(&np->n_openlock);
4621 }
4622 nextnflp = TAILQ_NEXT(nflp, nfl_link);
4623 if (error) {
4624 break;
4625 }
4626 nflp->nfl_start = end + 1;
4627 }
4628 if (nflp->nfl_blockcnt) {
4629 /* wake up anyone blocked on this lock */
4630 wakeup(nflp);
4631 } else if (nflp->nfl_flags & NFS_FILE_LOCK_DEAD) {
4632 /* remove nflp from lock list and destroy */
4633 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
4634 nfs_file_lock_destroy(nflp);
4635 }
4636 }
4637 out:
4638 lck_mtx_unlock(&np->n_openlock);
4639 nfs_open_state_clear_busy(np);
4640 nfs_mount_state_in_use_end(nmp, 0);
4641
4642 if (newnflp) {
4643 nfs_file_lock_destroy(newnflp);
4644 }
4645 return error;
4646 }
4647
4648 /*
4649 * NFSv4 advisory file locking
4650 */
4651 int
4652 nfs_vnop_advlock(
4653 struct vnop_advlock_args /* {
4654 * struct vnodeop_desc *a_desc;
4655 * vnode_t a_vp;
4656 * caddr_t a_id;
4657 * int a_op;
4658 * struct flock *a_fl;
4659 * int a_flags;
4660 * vfs_context_t a_context;
4661 * } */*ap)
4662 {
4663 vnode_t vp = ap->a_vp;
4664 nfsnode_t np = VTONFS(ap->a_vp);
4665 struct flock *fl = ap->a_fl;
4666 int op = ap->a_op;
4667 int flags = ap->a_flags;
4668 vfs_context_t ctx = ap->a_context;
4669 struct nfsmount *nmp;
4670 struct nfs_open_owner *noop = NULL;
4671 struct nfs_open_file *nofp = NULL;
4672 struct nfs_lock_owner *nlop = NULL;
4673 off_t lstart;
4674 uint64_t start, end;
4675 int error = 0, modified, style;
4676 enum vtype vtype;
4677 #define OFF_MAX QUAD_MAX
4678
4679 nmp = VTONMP(ap->a_vp);
4680 if (nfs_mount_gone(nmp)) {
4681 return ENXIO;
4682 }
4683 lck_mtx_lock(&nmp->nm_lock);
4684 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
4685 lck_mtx_unlock(&nmp->nm_lock);
4686 return ENOTSUP;
4687 }
4688 lck_mtx_unlock(&nmp->nm_lock);
4689
4690 if (np->n_flag & NREVOKE) {
4691 return EIO;
4692 }
4693 vtype = vnode_vtype(ap->a_vp);
4694 if (vtype == VDIR) { /* ignore lock requests on directories */
4695 return 0;
4696 }
4697 if (vtype != VREG) { /* anything other than regular files is invalid */
4698 return EINVAL;
4699 }
4700
4701 /* Convert the flock structure into a start and end. */
4702 switch (fl->l_whence) {
4703 case SEEK_SET:
4704 case SEEK_CUR:
4705 /*
4706 * Caller is responsible for adding any necessary offset
4707 * to fl->l_start when SEEK_CUR is used.
4708 */
4709 lstart = fl->l_start;
4710 break;
4711 case SEEK_END:
4712 /* need to flush, and refetch attributes to make */
4713 /* sure we have the correct end of file offset */
4714 if ((error = nfs_node_lock(np))) {
4715 return error;
4716 }
4717 modified = (np->n_flag & NMODIFIED);
4718 nfs_node_unlock(np);
4719 if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) {
4720 return error;
4721 }
4722 if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) {
4723 return error;
4724 }
4725 nfs_data_lock(np, NFS_DATA_LOCK_SHARED);
4726 if ((np->n_size > OFF_MAX) ||
4727 ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) {
4728 error = EOVERFLOW;
4729 }
4730 lstart = np->n_size + fl->l_start;
4731 nfs_data_unlock(np);
4732 if (error) {
4733 return error;
4734 }
4735 break;
4736 default:
4737 return EINVAL;
4738 }
4739 if (lstart < 0) {
4740 return EINVAL;
4741 }
4742 start = lstart;
4743 if (fl->l_len == 0) {
4744 end = UINT64_MAX;
4745 } else if (fl->l_len > 0) {
4746 if ((fl->l_len - 1) > (OFF_MAX - lstart)) {
4747 return EOVERFLOW;
4748 }
4749 end = start - 1 + fl->l_len;
4750 } else { /* l_len is negative */
4751 if ((lstart + fl->l_len) < 0) {
4752 return EINVAL;
4753 }
4754 end = start - 1;
4755 start += fl->l_len;
4756 }
4757 if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) {
4758 return EINVAL;
4759 }
4760
4761 style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX;
4762 if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) {
4763 return EINVAL;
4764 }
4765
4766 /* find the lock owner, alloc if not unlock */
4767 nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK));
4768 if (!nlop) {
4769 error = (op == F_UNLCK) ? 0 : ENOMEM;
4770 if (error) {
4771 NP(np, "nfs_vnop_advlock: no lock owner, error %d", error);
4772 }
4773 goto out;
4774 }
4775
4776 if (op == F_GETLK) {
4777 error = nfs_advlock_getlock(np, nlop, fl, start, end, ctx);
4778 } else {
4779 /* find the open owner */
4780 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0);
4781 if (!noop) {
4782 NP(np, "nfs_vnop_advlock: no open owner %d", kauth_cred_getuid(vfs_context_ucred(ctx)));
4783 error = EPERM;
4784 goto out;
4785 }
4786 /* find the open file */
4787 #if CONFIG_NFS4
4788 restart:
4789 #endif
4790 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0);
4791 if (error) {
4792 error = EBADF;
4793 }
4794 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
4795 NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred));
4796 error = EIO;
4797 }
4798 #if CONFIG_NFS4
4799 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4800 error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx)));
4801 nofp = NULL;
4802 if (!error) {
4803 goto restart;
4804 }
4805 }
4806 #endif
4807 if (error) {
4808 NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred));
4809 goto out;
4810 }
4811 if (op == F_UNLCK) {
4812 error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx);
4813 } else if ((op == F_SETLK) || (op == F_SETLKW)) {
4814 if ((op == F_SETLK) && (flags & F_WAIT)) {
4815 op = F_SETLKW;
4816 }
4817 error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx);
4818 } else {
4819 /* not getlk, unlock or lock? */
4820 error = EINVAL;
4821 }
4822 }
4823
4824 out:
4825 if (nlop) {
4826 nfs_lock_owner_rele(nlop);
4827 }
4828 if (noop) {
4829 nfs_open_owner_rele(noop);
4830 }
4831 return error;
4832 }
4833
4834 /*
4835 * Check if an open owner holds any locks on a file.
4836 */
4837 int
4838 nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp)
4839 {
4840 struct nfs_lock_owner *nlop;
4841
4842 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
4843 if (nlop->nlo_open_owner != noop) {
4844 continue;
4845 }
4846 if (!TAILQ_EMPTY(&nlop->nlo_locks)) {
4847 break;
4848 }
4849 }
4850 return nlop ? 1 : 0;
4851 }
4852
4853 #if CONFIG_NFS4
4854 /*
4855 * Reopen simple (no deny, no locks) open state that was lost.
4856 */
4857 int
4858 nfs4_reopen(struct nfs_open_file *nofp, thread_t thd)
4859 {
4860 struct nfs_open_owner *noop = nofp->nof_owner;
4861 struct nfsmount *nmp = NFSTONMP(nofp->nof_np);
4862 nfsnode_t np = nofp->nof_np;
4863 vnode_t vp = NFSTOV(np);
4864 vnode_t dvp = NULL;
4865 struct componentname cn;
4866 const char *vname = NULL;
4867 const char *name = NULL;
4868 size_t namelen;
4869 char smallname[128];
4870 char *filename = NULL;
4871 int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
4872 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4873
4874 lck_mtx_lock(&nofp->nof_lock);
4875 while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) {
4876 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
4877 break;
4878 }
4879 msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts);
4880 slpflag = 0;
4881 }
4882 if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
4883 lck_mtx_unlock(&nofp->nof_lock);
4884 return error;
4885 }
4886 nofp->nof_flags |= NFS_OPEN_FILE_REOPENING;
4887 lck_mtx_unlock(&nofp->nof_lock);
4888
4889 nfs_node_lock_force(np);
4890 if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) {
4891 /*
4892 * The node's been sillyrenamed, so we need to use
4893 * the sillyrename directory/name to do the open.
4894 */
4895 struct nfs_sillyrename *nsp = np->n_sillyrename;
4896 dvp = NFSTOV(nsp->nsr_dnp);
4897 if ((error = vnode_get(dvp))) {
4898 dvp = NULLVP;
4899 nfs_node_unlock(np);
4900 goto out;
4901 }
4902 name = nsp->nsr_name;
4903 } else {
4904 /*
4905 * [sigh] We can't trust VFS to get the parent right for named
4906 * attribute nodes. (It likes to reparent the nodes after we've
4907 * created them.) Luckily we can probably get the right parent
4908 * from the n_parent we have stashed away.
4909 */
4910 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
4911 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
4912 dvp = NULL;
4913 }
4914 if (!dvp) {
4915 dvp = vnode_getparent(vp);
4916 }
4917 vname = vnode_getname(vp);
4918 if (!dvp || !vname) {
4919 if (!error) {
4920 error = EIO;
4921 }
4922 nfs_node_unlock(np);
4923 goto out;
4924 }
4925 name = vname;
4926 }
4927 filename = &smallname[0];
4928 namelen = snprintf(filename, sizeof(smallname), "%s", name);
4929 if (namelen >= sizeof(smallname)) {
4930 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
4931 if (!filename) {
4932 error = ENOMEM;
4933 goto out;
4934 }
4935 snprintf(filename, namelen + 1, "%s", name);
4936 }
4937 nfs_node_unlock(np);
4938 bzero(&cn, sizeof(cn));
4939 cn.cn_nameptr = filename;
4940 cn.cn_namelen = namelen;
4941
4942 restart:
4943 done = 0;
4944 if ((error = nfs_mount_state_in_use_start(nmp, thd))) {
4945 goto out;
4946 }
4947
4948 if (nofp->nof_rw) {
4949 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE);
4950 }
4951 if (!error && nofp->nof_w) {
4952 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE);
4953 }
4954 if (!error && nofp->nof_r) {
4955 error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE);
4956 }
4957
4958 if (nfs_mount_state_in_use_end(nmp, error)) {
4959 if (error == NFSERR_GRACE) {
4960 goto restart;
4961 }
4962 printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error,
4963 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4964 error = 0;
4965 goto out;
4966 }
4967 done = 1;
4968 out:
4969 if (error && (error != EINTR) && (error != ERESTART)) {
4970 nfs_revoke_open_state_for_node(np);
4971 }
4972 lck_mtx_lock(&nofp->nof_lock);
4973 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING;
4974 if (done) {
4975 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
4976 } else if (error) {
4977 printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error,
4978 (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???");
4979 }
4980 lck_mtx_unlock(&nofp->nof_lock);
4981 if (filename && (filename != &smallname[0])) {
4982 FREE(filename, M_TEMP);
4983 }
4984 if (vname) {
4985 vnode_putname(vname);
4986 }
4987 if (dvp != NULLVP) {
4988 vnode_put(dvp);
4989 }
4990 return error;
4991 }
4992
4993 /*
4994 * Send a normal OPEN RPC to open/create a file.
4995 */
4996 int
4997 nfs4_open_rpc(
4998 struct nfs_open_file *nofp,
4999 vfs_context_t ctx,
5000 struct componentname *cnp,
5001 struct vnode_attr *vap,
5002 vnode_t dvp,
5003 vnode_t *vpp,
5004 int create,
5005 int share_access,
5006 int share_deny)
5007 {
5008 return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx),
5009 cnp, vap, dvp, vpp, create, share_access, share_deny);
5010 }
5011
5012 /*
5013 * Send an OPEN RPC to reopen a file.
5014 */
5015 int
5016 nfs4_open_reopen_rpc(
5017 struct nfs_open_file *nofp,
5018 thread_t thd,
5019 kauth_cred_t cred,
5020 struct componentname *cnp,
5021 vnode_t dvp,
5022 vnode_t *vpp,
5023 int share_access,
5024 int share_deny)
5025 {
5026 return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny);
5027 }
5028
5029 /*
5030 * Send an OPEN_CONFIRM RPC to confirm an OPEN.
5031 */
5032 int
5033 nfs4_open_confirm_rpc(
5034 struct nfsmount *nmp,
5035 nfsnode_t dnp,
5036 u_char *fhp,
5037 int fhlen,
5038 struct nfs_open_owner *noop,
5039 nfs_stateid *sid,
5040 thread_t thd,
5041 kauth_cred_t cred,
5042 struct nfs_vattr *nvap,
5043 uint64_t *xidp)
5044 {
5045 struct nfsm_chain nmreq, nmrep;
5046 int error = 0, status, numops;
5047 struct nfsreq_secinfo_args si;
5048
5049 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
5050 nfsm_chain_null(&nmreq);
5051 nfsm_chain_null(&nmrep);
5052
5053 // PUTFH, OPEN_CONFIRM, GETATTR
5054 numops = 3;
5055 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5056 nfsm_chain_add_compound_header(error, &nmreq, "open_confirm", nmp->nm_minor_vers, numops);
5057 numops--;
5058 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5059 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
5060 numops--;
5061 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_CONFIRM);
5062 nfsm_chain_add_stateid(error, &nmreq, sid);
5063 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5064 numops--;
5065 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5066 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5067 nfsm_chain_build_done(error, &nmreq);
5068 nfsm_assert(error, (numops == 0), EPROTO);
5069 nfsmout_if(error);
5070 error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, &nmrep, xidp, &status);
5071
5072 nfsm_chain_skip_tag(error, &nmrep);
5073 nfsm_chain_get_32(error, &nmrep, numops);
5074 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5075 nfsmout_if(error);
5076 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_CONFIRM);
5077 nfs_owner_seqid_increment(noop, NULL, error);
5078 nfsm_chain_get_stateid(error, &nmrep, sid);
5079 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5080 nfsmout_if(error);
5081 error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL);
5082 nfsmout:
5083 nfsm_chain_cleanup(&nmreq);
5084 nfsm_chain_cleanup(&nmrep);
5085 return error;
5086 }
5087
5088 /*
5089 * common OPEN RPC code
5090 *
5091 * If create is set, ctx must be passed in.
5092 * Returns a node on success if no node passed in.
5093 */
5094 int
5095 nfs4_open_rpc_internal(
5096 struct nfs_open_file *nofp,
5097 vfs_context_t ctx,
5098 thread_t thd,
5099 kauth_cred_t cred,
5100 struct componentname *cnp,
5101 struct vnode_attr *vap,
5102 vnode_t dvp,
5103 vnode_t *vpp,
5104 int create,
5105 int share_access,
5106 int share_deny)
5107 {
5108 struct nfsmount *nmp;
5109 struct nfs_open_owner *noop = nofp->nof_owner;
5110 struct nfs_vattr nvattr;
5111 int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
5112 int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid;
5113 u_int64_t xid, savedxid = 0;
5114 nfsnode_t dnp = VTONFS(dvp);
5115 nfsnode_t np, newnp = NULL;
5116 vnode_t newvp = NULL;
5117 struct nfsm_chain nmreq, nmrep;
5118 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5119 uint32_t rflags, delegation, recall;
5120 struct nfs_stateid stateid, dstateid, *sid;
5121 fhandle_t fh;
5122 struct nfsreq rq, *req = &rq;
5123 struct nfs_dulookup dul;
5124 char sbuf[64], *s;
5125 uint32_t ace_type, ace_flags, ace_mask, len, slen;
5126 struct kauth_ace ace;
5127 struct nfsreq_secinfo_args si;
5128
5129 if (create && !ctx) {
5130 return EINVAL;
5131 }
5132
5133 nmp = VTONMP(dvp);
5134 if (nfs_mount_gone(nmp)) {
5135 return ENXIO;
5136 }
5137 nfsvers = nmp->nm_vers;
5138 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
5139 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
5140 return EINVAL;
5141 }
5142
5143 np = *vpp ? VTONFS(*vpp) : NULL;
5144 if (create && vap) {
5145 exclusive = (vap->va_vaflags & VA_EXCLUSIVE);
5146 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
5147 gotuid = VATTR_IS_ACTIVE(vap, va_uid);
5148 gotgid = VATTR_IS_ACTIVE(vap, va_gid);
5149 if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) {
5150 vap->va_vaflags |= VA_UTIMES_NULL;
5151 }
5152 } else {
5153 exclusive = gotuid = gotgid = 0;
5154 }
5155 if (nofp) {
5156 sid = &nofp->nof_stateid;
5157 } else {
5158 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
5159 sid = &stateid;
5160 }
5161
5162 if ((error = nfs_open_owner_set_busy(noop, thd))) {
5163 return error;
5164 }
5165 again:
5166 rflags = delegation = recall = 0;
5167 ace.ace_flags = 0;
5168 s = sbuf;
5169 slen = sizeof(sbuf);
5170 NVATTR_INIT(&nvattr);
5171 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen);
5172
5173 nfsm_chain_null(&nmreq);
5174 nfsm_chain_null(&nmrep);
5175
5176 // PUTFH, SAVEFH, OPEN(CREATE?), GETATTR(FH), RESTOREFH, GETATTR
5177 numops = 6;
5178 nfsm_chain_build_alloc_init(error, &nmreq, 53 * NFSX_UNSIGNED + cnp->cn_namelen);
5179 nfsm_chain_add_compound_header(error, &nmreq, create ? "create" : "open", nmp->nm_minor_vers, numops);
5180 numops--;
5181 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5182 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
5183 numops--;
5184 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
5185 numops--;
5186 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5187 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5188 nfsm_chain_add_32(error, &nmreq, share_access);
5189 nfsm_chain_add_32(error, &nmreq, share_deny);
5190 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
5191 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5192 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
5193 nfsm_chain_add_32(error, &nmreq, create);
5194 if (create) {
5195 if (exclusive) {
5196 static uint32_t create_verf; // XXX need a better verifier
5197 create_verf++;
5198 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE);
5199 /* insert 64 bit verifier */
5200 nfsm_chain_add_32(error, &nmreq, create_verf);
5201 nfsm_chain_add_32(error, &nmreq, create_verf);
5202 } else {
5203 nfsm_chain_add_32(error, &nmreq, NFS_CREATE_UNCHECKED);
5204 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
5205 }
5206 }
5207 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
5208 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
5209 numops--;
5210 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5211 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5212 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5213 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5214 numops--;
5215 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
5216 numops--;
5217 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5218 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
5219 nfsm_chain_build_done(error, &nmreq);
5220 nfsm_assert(error, (numops == 0), EPROTO);
5221 if (!error) {
5222 error = busyerror = nfs_node_set_busy(dnp, thd);
5223 }
5224 nfsmout_if(error);
5225
5226 if (create && !namedattrs) {
5227 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
5228 }
5229
5230 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req);
5231 if (!error) {
5232 if (create && !namedattrs) {
5233 nfs_dulookup_start(&dul, dnp, ctx);
5234 }
5235 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
5236 savedxid = xid;
5237 }
5238
5239 if (create && !namedattrs) {
5240 nfs_dulookup_finish(&dul, dnp, ctx);
5241 }
5242
5243 if ((lockerror = nfs_node_lock(dnp))) {
5244 error = lockerror;
5245 }
5246 nfsm_chain_skip_tag(error, &nmrep);
5247 nfsm_chain_get_32(error, &nmrep, numops);
5248 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5249 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
5250 nfsmout_if(error);
5251 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5252 nfs_owner_seqid_increment(noop, NULL, error);
5253 nfsm_chain_get_stateid(error, &nmrep, sid);
5254 nfsm_chain_check_change_info(error, &nmrep, dnp);
5255 nfsm_chain_get_32(error, &nmrep, rflags);
5256 bmlen = NFS_ATTR_BITMAP_LEN;
5257 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5258 nfsm_chain_get_32(error, &nmrep, delegation);
5259 if (!error) {
5260 switch (delegation) {
5261 case NFS_OPEN_DELEGATE_NONE:
5262 break;
5263 case NFS_OPEN_DELEGATE_READ:
5264 case NFS_OPEN_DELEGATE_WRITE:
5265 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5266 nfsm_chain_get_32(error, &nmrep, recall);
5267 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5268 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5269 }
5270 /* if we have any trouble accepting the ACE, just invalidate it */
5271 ace_type = ace_flags = ace_mask = len = 0;
5272 nfsm_chain_get_32(error, &nmrep, ace_type);
5273 nfsm_chain_get_32(error, &nmrep, ace_flags);
5274 nfsm_chain_get_32(error, &nmrep, ace_mask);
5275 nfsm_chain_get_32(error, &nmrep, len);
5276 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5277 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5278 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5279 if (!error && (len >= slen)) {
5280 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5281 if (s) {
5282 slen = len + 1;
5283 } else {
5284 ace.ace_flags = 0;
5285 }
5286 }
5287 if (s) {
5288 nfsm_chain_get_opaque(error, &nmrep, len, s);
5289 } else {
5290 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5291 }
5292 if (!error && s) {
5293 s[len] = '\0';
5294 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5295 ace.ace_flags = 0;
5296 }
5297 }
5298 if (error || !s) {
5299 ace.ace_flags = 0;
5300 }
5301 if (s && (s != sbuf)) {
5302 FREE(s, M_TEMP);
5303 }
5304 break;
5305 default:
5306 error = EBADRPC;
5307 break;
5308 }
5309 }
5310 /* At this point if we have no error, the object was created/opened. */
5311 open_error = error;
5312 nfsmout_if(error);
5313 if (create && vap && !exclusive) {
5314 nfs_vattr_set_supported(bitmap, vap);
5315 }
5316 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5317 nfsmout_if(error);
5318 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5319 nfsmout_if(error);
5320 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5321 printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr);
5322 error = EBADRPC;
5323 goto nfsmout;
5324 }
5325 if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5326 // XXX for the open case, what if fh doesn't match the vnode we think we're opening?
5327 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5328 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5329 NP(np, "nfs4_open_rpc: warning: file handle mismatch");
5330 }
5331 }
5332 /* directory attributes: if we don't get them, make sure to invalidate */
5333 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
5334 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5335 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
5336 if (error) {
5337 NATTRINVALIDATE(dnp);
5338 }
5339 nfsmout_if(error);
5340
5341 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5342 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5343 }
5344
5345 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
5346 nfs_node_unlock(dnp);
5347 lockerror = ENOENT;
5348 NVATTR_CLEANUP(&nvattr);
5349 error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid);
5350 nfsmout_if(error);
5351 savedxid = xid;
5352 if ((lockerror = nfs_node_lock(dnp))) {
5353 error = lockerror;
5354 }
5355 }
5356
5357 nfsmout:
5358 nfsm_chain_cleanup(&nmreq);
5359 nfsm_chain_cleanup(&nmrep);
5360
5361 if (!lockerror && create) {
5362 if (!open_error && (dnp->n_flag & NNEGNCENTRIES)) {
5363 dnp->n_flag &= ~NNEGNCENTRIES;
5364 cache_purge_negatives(dvp);
5365 }
5366 dnp->n_flag |= NMODIFIED;
5367 nfs_node_unlock(dnp);
5368 lockerror = ENOENT;
5369 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
5370 }
5371 if (!lockerror) {
5372 nfs_node_unlock(dnp);
5373 }
5374 if (!error && !np && fh.fh_len) {
5375 /* create the vnode with the filehandle and attributes */
5376 xid = savedxid;
5377 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp);
5378 if (!error) {
5379 newvp = NFSTOV(newnp);
5380 }
5381 }
5382 NVATTR_CLEANUP(&nvattr);
5383 if (!busyerror) {
5384 nfs_node_clear_busy(dnp);
5385 }
5386 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5387 if (!np) {
5388 np = newnp;
5389 }
5390 if (!error && np && !recall) {
5391 /* stuff the delegation state in the node */
5392 lck_mtx_lock(&np->n_openlock);
5393 np->n_openflags &= ~N_DELEG_MASK;
5394 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5395 np->n_dstateid = dstateid;
5396 np->n_dace = ace;
5397 if (np->n_dlink.tqe_next == NFSNOLIST) {
5398 lck_mtx_lock(&nmp->nm_lock);
5399 if (np->n_dlink.tqe_next == NFSNOLIST) {
5400 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5401 }
5402 lck_mtx_unlock(&nmp->nm_lock);
5403 }
5404 lck_mtx_unlock(&np->n_openlock);
5405 } else {
5406 /* give the delegation back */
5407 if (np) {
5408 if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5409 /* update delegation state and return it */
5410 lck_mtx_lock(&np->n_openlock);
5411 np->n_openflags &= ~N_DELEG_MASK;
5412 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5413 np->n_dstateid = dstateid;
5414 np->n_dace = ace;
5415 if (np->n_dlink.tqe_next == NFSNOLIST) {
5416 lck_mtx_lock(&nmp->nm_lock);
5417 if (np->n_dlink.tqe_next == NFSNOLIST) {
5418 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5419 }
5420 lck_mtx_unlock(&nmp->nm_lock);
5421 }
5422 lck_mtx_unlock(&np->n_openlock);
5423 /* don't need to send a separate delegreturn for fh */
5424 fh.fh_len = 0;
5425 }
5426 /* return np's current delegation */
5427 nfs4_delegation_return(np, 0, thd, cred);
5428 }
5429 if (fh.fh_len) { /* return fh's delegation if it wasn't for np */
5430 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
5431 }
5432 }
5433 }
5434 if (error) {
5435 if (exclusive && (error == NFSERR_NOTSUPP)) {
5436 exclusive = 0;
5437 goto again;
5438 }
5439 if (newvp) {
5440 nfs_node_unlock(newnp);
5441 vnode_put(newvp);
5442 }
5443 } else if (create) {
5444 nfs_node_unlock(newnp);
5445 if (exclusive) {
5446 error = nfs4_setattr_rpc(newnp, vap, ctx);
5447 if (error && (gotuid || gotgid)) {
5448 /* it's possible the server didn't like our attempt to set IDs. */
5449 /* so, let's try it again without those */
5450 VATTR_CLEAR_ACTIVE(vap, va_uid);
5451 VATTR_CLEAR_ACTIVE(vap, va_gid);
5452 error = nfs4_setattr_rpc(newnp, vap, ctx);
5453 }
5454 }
5455 if (error) {
5456 vnode_put(newvp);
5457 } else {
5458 *vpp = newvp;
5459 }
5460 }
5461 nfs_open_owner_clear_busy(noop);
5462 return error;
5463 }
5464
5465
5466 /*
5467 * Send an OPEN RPC to claim a delegated open for a file
5468 */
5469 int
5470 nfs4_claim_delegated_open_rpc(
5471 struct nfs_open_file *nofp,
5472 int share_access,
5473 int share_deny,
5474 int flags)
5475 {
5476 struct nfsmount *nmp;
5477 struct nfs_open_owner *noop = nofp->nof_owner;
5478 struct nfs_vattr nvattr;
5479 int error = 0, lockerror = ENOENT, status;
5480 int nfsvers, numops;
5481 u_int64_t xid;
5482 nfsnode_t np = nofp->nof_np;
5483 struct nfsm_chain nmreq, nmrep;
5484 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5485 uint32_t rflags = 0, delegation, recall = 0;
5486 fhandle_t fh;
5487 struct nfs_stateid dstateid;
5488 char sbuf[64], *s = sbuf;
5489 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5490 struct kauth_ace ace;
5491 vnode_t dvp = NULL;
5492 const char *vname = NULL;
5493 const char *name = NULL;
5494 size_t namelen;
5495 char smallname[128];
5496 char *filename = NULL;
5497 struct nfsreq_secinfo_args si;
5498
5499 nmp = NFSTONMP(np);
5500 if (nfs_mount_gone(nmp)) {
5501 return ENXIO;
5502 }
5503 nfsvers = nmp->nm_vers;
5504
5505 nfs_node_lock_force(np);
5506 if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) {
5507 /*
5508 * The node's been sillyrenamed, so we need to use
5509 * the sillyrename directory/name to do the open.
5510 */
5511 struct nfs_sillyrename *nsp = np->n_sillyrename;
5512 dvp = NFSTOV(nsp->nsr_dnp);
5513 if ((error = vnode_get(dvp))) {
5514 dvp = NULLVP;
5515 nfs_node_unlock(np);
5516 goto out;
5517 }
5518 name = nsp->nsr_name;
5519 } else {
5520 /*
5521 * [sigh] We can't trust VFS to get the parent right for named
5522 * attribute nodes. (It likes to reparent the nodes after we've
5523 * created them.) Luckily we can probably get the right parent
5524 * from the n_parent we have stashed away.
5525 */
5526 if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) &&
5527 (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) {
5528 dvp = NULL;
5529 }
5530 if (!dvp) {
5531 dvp = vnode_getparent(NFSTOV(np));
5532 }
5533 vname = vnode_getname(NFSTOV(np));
5534 if (!dvp || !vname) {
5535 if (!error) {
5536 error = EIO;
5537 }
5538 nfs_node_unlock(np);
5539 goto out;
5540 }
5541 name = vname;
5542 }
5543 filename = &smallname[0];
5544 namelen = snprintf(filename, sizeof(smallname), "%s", name);
5545 if (namelen >= sizeof(smallname)) {
5546 MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK);
5547 if (!filename) {
5548 error = ENOMEM;
5549 nfs_node_unlock(np);
5550 goto out;
5551 }
5552 snprintf(filename, namelen + 1, "%s", name);
5553 }
5554 nfs_node_unlock(np);
5555
5556 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5557 goto out;
5558 }
5559 NVATTR_INIT(&nvattr);
5560 delegation = NFS_OPEN_DELEGATE_NONE;
5561 dstateid = np->n_dstateid;
5562 NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen);
5563
5564 nfsm_chain_null(&nmreq);
5565 nfsm_chain_null(&nmrep);
5566
5567 // PUTFH, OPEN, GETATTR(FH)
5568 numops = 3;
5569 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5570 nfsm_chain_add_compound_header(error, &nmreq, "open_claim_d", nmp->nm_minor_vers, numops);
5571 numops--;
5572 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5573 nfsm_chain_add_fh(error, &nmreq, nfsvers, VTONFS(dvp)->n_fhp, VTONFS(dvp)->n_fhsize);
5574 numops--;
5575 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5576 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5577 nfsm_chain_add_32(error, &nmreq, share_access);
5578 nfsm_chain_add_32(error, &nmreq, share_deny);
5579 // open owner: clientid + uid
5580 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5581 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5582 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5583 // openflag4
5584 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5585 // open_claim4
5586 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_DELEGATE_CUR);
5587 nfsm_chain_add_stateid(error, &nmreq, &np->n_dstateid);
5588 nfsm_chain_add_name(error, &nmreq, filename, namelen, nmp);
5589 numops--;
5590 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5591 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5592 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5593 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5594 nfsm_chain_build_done(error, &nmreq);
5595 nfsm_assert(error, (numops == 0), EPROTO);
5596 nfsmout_if(error);
5597
5598 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5599 noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
5600
5601 if ((lockerror = nfs_node_lock(np))) {
5602 error = lockerror;
5603 }
5604 nfsm_chain_skip_tag(error, &nmrep);
5605 nfsm_chain_get_32(error, &nmrep, numops);
5606 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5607 nfsmout_if(error);
5608 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5609 nfs_owner_seqid_increment(noop, NULL, error);
5610 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5611 nfsm_chain_check_change_info(error, &nmrep, np);
5612 nfsm_chain_get_32(error, &nmrep, rflags);
5613 bmlen = NFS_ATTR_BITMAP_LEN;
5614 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5615 nfsm_chain_get_32(error, &nmrep, delegation);
5616 if (!error) {
5617 switch (delegation) {
5618 case NFS_OPEN_DELEGATE_NONE:
5619 // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */
5620 // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???");
5621 break;
5622 case NFS_OPEN_DELEGATE_READ:
5623 case NFS_OPEN_DELEGATE_WRITE:
5624 if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) &&
5625 (delegation == NFS_OPEN_DELEGATE_WRITE)) ||
5626 (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) &&
5627 (delegation == NFS_OPEN_DELEGATE_READ))) {
5628 printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n",
5629 ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R",
5630 (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???");
5631 }
5632 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5633 nfsm_chain_get_32(error, &nmrep, recall);
5634 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5635 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5636 }
5637 /* if we have any trouble accepting the ACE, just invalidate it */
5638 ace_type = ace_flags = ace_mask = len = 0;
5639 nfsm_chain_get_32(error, &nmrep, ace_type);
5640 nfsm_chain_get_32(error, &nmrep, ace_flags);
5641 nfsm_chain_get_32(error, &nmrep, ace_mask);
5642 nfsm_chain_get_32(error, &nmrep, len);
5643 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5644 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5645 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5646 if (!error && (len >= slen)) {
5647 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5648 if (s) {
5649 slen = len + 1;
5650 } else {
5651 ace.ace_flags = 0;
5652 }
5653 }
5654 if (s) {
5655 nfsm_chain_get_opaque(error, &nmrep, len, s);
5656 } else {
5657 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5658 }
5659 if (!error && s) {
5660 s[len] = '\0';
5661 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5662 ace.ace_flags = 0;
5663 }
5664 }
5665 if (error || !s) {
5666 ace.ace_flags = 0;
5667 }
5668 if (s && (s != sbuf)) {
5669 FREE(s, M_TEMP);
5670 }
5671 if (!error) {
5672 /* stuff the latest delegation state in the node */
5673 lck_mtx_lock(&np->n_openlock);
5674 np->n_openflags &= ~N_DELEG_MASK;
5675 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5676 np->n_dstateid = dstateid;
5677 np->n_dace = ace;
5678 if (np->n_dlink.tqe_next == NFSNOLIST) {
5679 lck_mtx_lock(&nmp->nm_lock);
5680 if (np->n_dlink.tqe_next == NFSNOLIST) {
5681 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5682 }
5683 lck_mtx_unlock(&nmp->nm_lock);
5684 }
5685 lck_mtx_unlock(&np->n_openlock);
5686 }
5687 break;
5688 default:
5689 error = EBADRPC;
5690 break;
5691 }
5692 }
5693 nfsmout_if(error);
5694 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5695 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5696 nfsmout_if(error);
5697 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5698 printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???");
5699 error = EBADRPC;
5700 goto nfsmout;
5701 }
5702 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5703 // XXX what if fh doesn't match the vnode we think we're re-opening?
5704 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5705 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5706 printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???");
5707 }
5708 }
5709 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5710 nfsmout_if(error);
5711 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5712 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5713 }
5714 nfsmout:
5715 NVATTR_CLEANUP(&nvattr);
5716 nfsm_chain_cleanup(&nmreq);
5717 nfsm_chain_cleanup(&nmrep);
5718 if (!lockerror) {
5719 nfs_node_unlock(np);
5720 }
5721 nfs_open_owner_clear_busy(noop);
5722 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5723 if (recall) {
5724 /*
5725 * We're making a delegated claim.
5726 * Don't return the delegation here in case we have more to claim.
5727 * Just make sure it's queued up to be returned.
5728 */
5729 nfs4_delegation_return_enqueue(np);
5730 }
5731 }
5732 out:
5733 // if (!error)
5734 // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???");
5735 if (filename && (filename != &smallname[0])) {
5736 FREE(filename, M_TEMP);
5737 }
5738 if (vname) {
5739 vnode_putname(vname);
5740 }
5741 if (dvp != NULLVP) {
5742 vnode_put(dvp);
5743 }
5744 return error;
5745 }
5746
5747 /*
5748 * Send an OPEN RPC to reclaim an open file.
5749 */
5750 int
5751 nfs4_open_reclaim_rpc(
5752 struct nfs_open_file *nofp,
5753 int share_access,
5754 int share_deny)
5755 {
5756 struct nfsmount *nmp;
5757 struct nfs_open_owner *noop = nofp->nof_owner;
5758 struct nfs_vattr nvattr;
5759 int error = 0, lockerror = ENOENT, status;
5760 int nfsvers, numops;
5761 u_int64_t xid;
5762 nfsnode_t np = nofp->nof_np;
5763 struct nfsm_chain nmreq, nmrep;
5764 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
5765 uint32_t rflags = 0, delegation, recall = 0;
5766 fhandle_t fh;
5767 struct nfs_stateid dstateid;
5768 char sbuf[64], *s = sbuf;
5769 uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf);
5770 struct kauth_ace ace;
5771 struct nfsreq_secinfo_args si;
5772
5773 nmp = NFSTONMP(np);
5774 if (nfs_mount_gone(nmp)) {
5775 return ENXIO;
5776 }
5777 nfsvers = nmp->nm_vers;
5778
5779 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5780 return error;
5781 }
5782
5783 NVATTR_INIT(&nvattr);
5784 delegation = NFS_OPEN_DELEGATE_NONE;
5785 dstateid = np->n_dstateid;
5786 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5787
5788 nfsm_chain_null(&nmreq);
5789 nfsm_chain_null(&nmrep);
5790
5791 // PUTFH, OPEN, GETATTR(FH)
5792 numops = 3;
5793 nfsm_chain_build_alloc_init(error, &nmreq, 48 * NFSX_UNSIGNED);
5794 nfsm_chain_add_compound_header(error, &nmreq, "open_reclaim", nmp->nm_minor_vers, numops);
5795 numops--;
5796 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5797 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5798 numops--;
5799 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
5800 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
5801 nfsm_chain_add_32(error, &nmreq, share_access);
5802 nfsm_chain_add_32(error, &nmreq, share_deny);
5803 // open owner: clientid + uid
5804 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid); // open_owner4.clientid
5805 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
5806 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred)); // open_owner4.owner
5807 // openflag4
5808 nfsm_chain_add_32(error, &nmreq, NFS_OPEN_NOCREATE);
5809 // open_claim4
5810 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS);
5811 delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ :
5812 (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE :
5813 NFS_OPEN_DELEGATE_NONE;
5814 nfsm_chain_add_32(error, &nmreq, delegation);
5815 delegation = NFS_OPEN_DELEGATE_NONE;
5816 numops--;
5817 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
5818 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
5819 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
5820 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
5821 nfsm_chain_build_done(error, &nmreq);
5822 nfsm_assert(error, (numops == 0), EPROTO);
5823 nfsmout_if(error);
5824
5825 error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(),
5826 noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status);
5827
5828 if ((lockerror = nfs_node_lock(np))) {
5829 error = lockerror;
5830 }
5831 nfsm_chain_skip_tag(error, &nmrep);
5832 nfsm_chain_get_32(error, &nmrep, numops);
5833 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5834 nfsmout_if(error);
5835 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
5836 nfs_owner_seqid_increment(noop, NULL, error);
5837 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
5838 nfsm_chain_check_change_info(error, &nmrep, np);
5839 nfsm_chain_get_32(error, &nmrep, rflags);
5840 bmlen = NFS_ATTR_BITMAP_LEN;
5841 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
5842 nfsm_chain_get_32(error, &nmrep, delegation);
5843 if (!error) {
5844 switch (delegation) {
5845 case NFS_OPEN_DELEGATE_NONE:
5846 if (np->n_openflags & N_DELEG_MASK) {
5847 /*
5848 * Hey! We were supposed to get our delegation back even
5849 * if it was getting immediately recalled. Bad server!
5850 *
5851 * Just try to return the existing delegation.
5852 */
5853 // NP(np, "nfs: open reclaim didn't return delegation?");
5854 delegation = (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : NFS_OPEN_DELEGATE_READ;
5855 recall = 1;
5856 }
5857 break;
5858 case NFS_OPEN_DELEGATE_READ:
5859 case NFS_OPEN_DELEGATE_WRITE:
5860 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
5861 nfsm_chain_get_32(error, &nmrep, recall);
5862 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
5863 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
5864 }
5865 /* if we have any trouble accepting the ACE, just invalidate it */
5866 ace_type = ace_flags = ace_mask = len = 0;
5867 nfsm_chain_get_32(error, &nmrep, ace_type);
5868 nfsm_chain_get_32(error, &nmrep, ace_flags);
5869 nfsm_chain_get_32(error, &nmrep, ace_mask);
5870 nfsm_chain_get_32(error, &nmrep, len);
5871 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
5872 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
5873 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
5874 if (!error && (len >= slen)) {
5875 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
5876 if (s) {
5877 slen = len + 1;
5878 } else {
5879 ace.ace_flags = 0;
5880 }
5881 }
5882 if (s) {
5883 nfsm_chain_get_opaque(error, &nmrep, len, s);
5884 } else {
5885 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
5886 }
5887 if (!error && s) {
5888 s[len] = '\0';
5889 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
5890 ace.ace_flags = 0;
5891 }
5892 }
5893 if (error || !s) {
5894 ace.ace_flags = 0;
5895 }
5896 if (s && (s != sbuf)) {
5897 FREE(s, M_TEMP);
5898 }
5899 if (!error) {
5900 /* stuff the delegation state in the node */
5901 lck_mtx_lock(&np->n_openlock);
5902 np->n_openflags &= ~N_DELEG_MASK;
5903 np->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
5904 np->n_dstateid = dstateid;
5905 np->n_dace = ace;
5906 if (np->n_dlink.tqe_next == NFSNOLIST) {
5907 lck_mtx_lock(&nmp->nm_lock);
5908 if (np->n_dlink.tqe_next == NFSNOLIST) {
5909 TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink);
5910 }
5911 lck_mtx_unlock(&nmp->nm_lock);
5912 }
5913 lck_mtx_unlock(&np->n_openlock);
5914 }
5915 break;
5916 default:
5917 error = EBADRPC;
5918 break;
5919 }
5920 }
5921 nfsmout_if(error);
5922 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5923 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
5924 nfsmout_if(error);
5925 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
5926 NP(np, "nfs: open reclaim didn't return filehandle?");
5927 error = EBADRPC;
5928 goto nfsmout;
5929 }
5930 if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) {
5931 // XXX what if fh doesn't match the vnode we think we're re-opening?
5932 // That should be pretty hard in this case, given that we are doing
5933 // the open reclaim using the file handle (and not a dir/name pair).
5934 // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes.
5935 if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
5936 NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch");
5937 }
5938 }
5939 error = nfs_loadattrcache(np, &nvattr, &xid, 1);
5940 nfsmout_if(error);
5941 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
5942 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
5943 }
5944 nfsmout:
5945 // if (!error)
5946 // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny);
5947 NVATTR_CLEANUP(&nvattr);
5948 nfsm_chain_cleanup(&nmreq);
5949 nfsm_chain_cleanup(&nmrep);
5950 if (!lockerror) {
5951 nfs_node_unlock(np);
5952 }
5953 nfs_open_owner_clear_busy(noop);
5954 if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) {
5955 if (recall) {
5956 nfs4_delegation_return_enqueue(np);
5957 }
5958 }
5959 return error;
5960 }
5961
5962 int
5963 nfs4_open_downgrade_rpc(
5964 nfsnode_t np,
5965 struct nfs_open_file *nofp,
5966 vfs_context_t ctx)
5967 {
5968 struct nfs_open_owner *noop = nofp->nof_owner;
5969 struct nfsmount *nmp;
5970 int error, lockerror = ENOENT, status, nfsvers, numops;
5971 struct nfsm_chain nmreq, nmrep;
5972 u_int64_t xid;
5973 struct nfsreq_secinfo_args si;
5974
5975 nmp = NFSTONMP(np);
5976 if (nfs_mount_gone(nmp)) {
5977 return ENXIO;
5978 }
5979 nfsvers = nmp->nm_vers;
5980
5981 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
5982 return error;
5983 }
5984
5985 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
5986 nfsm_chain_null(&nmreq);
5987 nfsm_chain_null(&nmrep);
5988
5989 // PUTFH, OPEN_DOWNGRADE, GETATTR
5990 numops = 3;
5991 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
5992 nfsm_chain_add_compound_header(error, &nmreq, "open_downgrd", nmp->nm_minor_vers, numops);
5993 numops--;
5994 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
5995 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
5996 numops--;
5997 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN_DOWNGRADE);
5998 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
5999 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6000 nfsm_chain_add_32(error, &nmreq, nofp->nof_access);
6001 nfsm_chain_add_32(error, &nmreq, nofp->nof_deny);
6002 numops--;
6003 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6004 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6005 nfsm_chain_build_done(error, &nmreq);
6006 nfsm_assert(error, (numops == 0), EPROTO);
6007 nfsmout_if(error);
6008 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
6009 vfs_context_thread(ctx), vfs_context_ucred(ctx),
6010 &si, R_NOINTR, &nmrep, &xid, &status);
6011
6012 if ((lockerror = nfs_node_lock(np))) {
6013 error = lockerror;
6014 }
6015 nfsm_chain_skip_tag(error, &nmrep);
6016 nfsm_chain_get_32(error, &nmrep, numops);
6017 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6018 nfsmout_if(error);
6019 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN_DOWNGRADE);
6020 nfs_owner_seqid_increment(noop, NULL, error);
6021 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6022 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6023 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6024 nfsmout:
6025 if (!lockerror) {
6026 nfs_node_unlock(np);
6027 }
6028 nfs_open_owner_clear_busy(noop);
6029 nfsm_chain_cleanup(&nmreq);
6030 nfsm_chain_cleanup(&nmrep);
6031 return error;
6032 }
6033
6034 int
6035 nfs4_close_rpc(
6036 nfsnode_t np,
6037 struct nfs_open_file *nofp,
6038 thread_t thd,
6039 kauth_cred_t cred,
6040 int flags)
6041 {
6042 struct nfs_open_owner *noop = nofp->nof_owner;
6043 struct nfsmount *nmp;
6044 int error, lockerror = ENOENT, status, nfsvers, numops;
6045 struct nfsm_chain nmreq, nmrep;
6046 u_int64_t xid;
6047 struct nfsreq_secinfo_args si;
6048
6049 nmp = NFSTONMP(np);
6050 if (nfs_mount_gone(nmp)) {
6051 return ENXIO;
6052 }
6053 nfsvers = nmp->nm_vers;
6054
6055 if ((error = nfs_open_owner_set_busy(noop, NULL))) {
6056 return error;
6057 }
6058
6059 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
6060 nfsm_chain_null(&nmreq);
6061 nfsm_chain_null(&nmrep);
6062
6063 // PUTFH, CLOSE, GETATTR
6064 numops = 3;
6065 nfsm_chain_build_alloc_init(error, &nmreq, 23 * NFSX_UNSIGNED);
6066 nfsm_chain_add_compound_header(error, &nmreq, "close", nmp->nm_minor_vers, numops);
6067 numops--;
6068 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6069 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
6070 numops--;
6071 nfsm_chain_add_32(error, &nmreq, NFS_OP_CLOSE);
6072 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
6073 nfsm_chain_add_stateid(error, &nmreq, &nofp->nof_stateid);
6074 numops--;
6075 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
6076 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
6077 nfsm_chain_build_done(error, &nmreq);
6078 nfsm_assert(error, (numops == 0), EPROTO);
6079 nfsmout_if(error);
6080 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status);
6081
6082 if ((lockerror = nfs_node_lock(np))) {
6083 error = lockerror;
6084 }
6085 nfsm_chain_skip_tag(error, &nmrep);
6086 nfsm_chain_get_32(error, &nmrep, numops);
6087 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6088 nfsmout_if(error);
6089 nfsm_chain_op_check(error, &nmrep, NFS_OP_CLOSE);
6090 nfs_owner_seqid_increment(noop, NULL, error);
6091 nfsm_chain_get_stateid(error, &nmrep, &nofp->nof_stateid);
6092 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
6093 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
6094 nfsmout:
6095 if (!lockerror) {
6096 nfs_node_unlock(np);
6097 }
6098 nfs_open_owner_clear_busy(noop);
6099 nfsm_chain_cleanup(&nmreq);
6100 nfsm_chain_cleanup(&nmrep);
6101 return error;
6102 }
6103
6104
6105 /*
6106 * Claim the delegated open combinations this open file holds.
6107 */
6108 int
6109 nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags)
6110 {
6111 struct nfs_open_owner *noop = nofp->nof_owner;
6112 struct nfs_lock_owner *nlop;
6113 struct nfs_file_lock *nflp, *nextnflp;
6114 struct nfsmount *nmp;
6115 int error = 0, reopen = 0;
6116
6117 if (nofp->nof_d_rw_drw) {
6118 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH, flags);
6119 if (!error) {
6120 lck_mtx_lock(&nofp->nof_lock);
6121 nofp->nof_rw_drw += nofp->nof_d_rw_drw;
6122 nofp->nof_d_rw_drw = 0;
6123 lck_mtx_unlock(&nofp->nof_lock);
6124 }
6125 }
6126 if (!error && nofp->nof_d_w_drw) {
6127 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH, flags);
6128 if (!error) {
6129 lck_mtx_lock(&nofp->nof_lock);
6130 nofp->nof_w_drw += nofp->nof_d_w_drw;
6131 nofp->nof_d_w_drw = 0;
6132 lck_mtx_unlock(&nofp->nof_lock);
6133 }
6134 }
6135 if (!error && nofp->nof_d_r_drw) {
6136 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH, flags);
6137 if (!error) {
6138 lck_mtx_lock(&nofp->nof_lock);
6139 nofp->nof_r_drw += nofp->nof_d_r_drw;
6140 nofp->nof_d_r_drw = 0;
6141 lck_mtx_unlock(&nofp->nof_lock);
6142 }
6143 }
6144 if (!error && nofp->nof_d_rw_dw) {
6145 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE, flags);
6146 if (!error) {
6147 lck_mtx_lock(&nofp->nof_lock);
6148 nofp->nof_rw_dw += nofp->nof_d_rw_dw;
6149 nofp->nof_d_rw_dw = 0;
6150 lck_mtx_unlock(&nofp->nof_lock);
6151 }
6152 }
6153 if (!error && nofp->nof_d_w_dw) {
6154 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE, flags);
6155 if (!error) {
6156 lck_mtx_lock(&nofp->nof_lock);
6157 nofp->nof_w_dw += nofp->nof_d_w_dw;
6158 nofp->nof_d_w_dw = 0;
6159 lck_mtx_unlock(&nofp->nof_lock);
6160 }
6161 }
6162 if (!error && nofp->nof_d_r_dw) {
6163 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE, flags);
6164 if (!error) {
6165 lck_mtx_lock(&nofp->nof_lock);
6166 nofp->nof_r_dw += nofp->nof_d_r_dw;
6167 nofp->nof_d_r_dw = 0;
6168 lck_mtx_unlock(&nofp->nof_lock);
6169 }
6170 }
6171 /* non-deny-mode opens may be reopened if no locks are held */
6172 if (!error && nofp->nof_d_rw) {
6173 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags);
6174 /* for some errors, we should just try reopening the file */
6175 if (nfs_mount_state_error_delegation_lost(error)) {
6176 reopen = error;
6177 }
6178 if (!error || reopen) {
6179 lck_mtx_lock(&nofp->nof_lock);
6180 nofp->nof_rw += nofp->nof_d_rw;
6181 nofp->nof_d_rw = 0;
6182 lck_mtx_unlock(&nofp->nof_lock);
6183 }
6184 }
6185 /* if we've already set reopen, we should move these other two opens from delegated to not delegated */
6186 if ((!error || reopen) && nofp->nof_d_w) {
6187 if (!error) {
6188 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags);
6189 /* for some errors, we should just try reopening the file */
6190 if (nfs_mount_state_error_delegation_lost(error)) {
6191 reopen = error;
6192 }
6193 }
6194 if (!error || reopen) {
6195 lck_mtx_lock(&nofp->nof_lock);
6196 nofp->nof_w += nofp->nof_d_w;
6197 nofp->nof_d_w = 0;
6198 lck_mtx_unlock(&nofp->nof_lock);
6199 }
6200 }
6201 if ((!error || reopen) && nofp->nof_d_r) {
6202 if (!error) {
6203 error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags);
6204 /* for some errors, we should just try reopening the file */
6205 if (nfs_mount_state_error_delegation_lost(error)) {
6206 reopen = error;
6207 }
6208 }
6209 if (!error || reopen) {
6210 lck_mtx_lock(&nofp->nof_lock);
6211 nofp->nof_r += nofp->nof_d_r;
6212 nofp->nof_d_r = 0;
6213 lck_mtx_unlock(&nofp->nof_lock);
6214 }
6215 }
6216
6217 if (reopen) {
6218 /*
6219 * Any problems with the delegation probably indicates that we
6220 * should review/return all of our current delegation state.
6221 */
6222 if ((nmp = NFSTONMP(nofp->nof_np))) {
6223 nfs4_delegation_return_enqueue(nofp->nof_np);
6224 lck_mtx_lock(&nmp->nm_lock);
6225 nfs_need_recover(nmp, NFSERR_EXPIRED);
6226 lck_mtx_unlock(&nmp->nm_lock);
6227 }
6228 if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) {
6229 /* just reopen the file on next access */
6230 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d",
6231 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6232 lck_mtx_lock(&nofp->nof_lock);
6233 nofp->nof_flags |= NFS_OPEN_FILE_REOPEN;
6234 lck_mtx_unlock(&nofp->nof_lock);
6235 return 0;
6236 }
6237 if (reopen) {
6238 NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d",
6239 reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6240 }
6241 }
6242
6243 if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) {
6244 /* claim delegated locks */
6245 TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) {
6246 if (nlop->nlo_open_owner != noop) {
6247 continue;
6248 }
6249 TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) {
6250 /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */
6251 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6252 continue;
6253 }
6254 /* skip non-delegated locks */
6255 if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6256 continue;
6257 }
6258 error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred);
6259 if (error) {
6260 NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d",
6261 nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6262 break;
6263 }
6264 // else {
6265 // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d",
6266 // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6267 // }
6268 }
6269 if (error) {
6270 break;
6271 }
6272 }
6273 }
6274
6275 if (!error) { /* all state claimed successfully! */
6276 return 0;
6277 }
6278
6279 /* restart if it looks like a problem more than just losing the delegation */
6280 if (!nfs_mount_state_error_delegation_lost(error) &&
6281 ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) {
6282 NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6283 if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) {
6284 nfs_need_reconnect(nmp);
6285 }
6286 return error;
6287 }
6288
6289 /* delegated state lost (once held but now not claimable) */
6290 NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred));
6291
6292 /*
6293 * Any problems with the delegation probably indicates that we
6294 * should review/return all of our current delegation state.
6295 */
6296 if ((nmp = NFSTONMP(nofp->nof_np))) {
6297 nfs4_delegation_return_enqueue(nofp->nof_np);
6298 lck_mtx_lock(&nmp->nm_lock);
6299 nfs_need_recover(nmp, NFSERR_EXPIRED);
6300 lck_mtx_unlock(&nmp->nm_lock);
6301 }
6302
6303 /* revoke all open file state */
6304 nfs_revoke_open_state_for_node(nofp->nof_np);
6305
6306 return error;
6307 }
6308 #endif /* CONFIG_NFS4*/
6309
6310 /*
6311 * Release all open state for the given node.
6312 */
6313 void
6314 nfs_release_open_state_for_node(nfsnode_t np, int force)
6315 {
6316 struct nfsmount *nmp = NFSTONMP(np);
6317 struct nfs_open_file *nofp;
6318 struct nfs_file_lock *nflp, *nextnflp;
6319
6320 /* drop held locks */
6321 TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) {
6322 /* skip dead & blocked lock requests */
6323 if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) {
6324 continue;
6325 }
6326 /* send an unlock if not a delegated lock */
6327 if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) {
6328 nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER,
6329 NULL, nflp->nfl_owner->nlo_open_owner->noo_cred);
6330 }
6331 /* kill/remove the lock */
6332 lck_mtx_lock(&np->n_openlock);
6333 nflp->nfl_flags |= NFS_FILE_LOCK_DEAD;
6334 lck_mtx_lock(&nflp->nfl_owner->nlo_lock);
6335 TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink);
6336 lck_mtx_unlock(&nflp->nfl_owner->nlo_lock);
6337 if (nflp->nfl_blockcnt) {
6338 /* wake up anyone blocked on this lock */
6339 wakeup(nflp);
6340 } else {
6341 /* remove nflp from lock list and destroy */
6342 TAILQ_REMOVE(&np->n_locks, nflp, nfl_link);
6343 nfs_file_lock_destroy(nflp);
6344 }
6345 lck_mtx_unlock(&np->n_openlock);
6346 }
6347
6348 lck_mtx_lock(&np->n_openlock);
6349
6350 /* drop all opens */
6351 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6352 if (nofp->nof_flags & NFS_OPEN_FILE_LOST) {
6353 continue;
6354 }
6355 /* mark open state as lost */
6356 lck_mtx_lock(&nofp->nof_lock);
6357 nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN;
6358 nofp->nof_flags |= NFS_OPEN_FILE_LOST;
6359
6360 lck_mtx_unlock(&nofp->nof_lock);
6361 #if CONFIG_NFS4
6362 if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) {
6363 nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER);
6364 }
6365 #endif
6366 }
6367
6368 lck_mtx_unlock(&np->n_openlock);
6369 }
6370
6371 /*
6372 * State for a node has been lost, drop it, and revoke the node.
6373 * Attempt to return any state if possible in case the server
6374 * might somehow think we hold it.
6375 */
6376 void
6377 nfs_revoke_open_state_for_node(nfsnode_t np)
6378 {
6379 struct nfsmount *nmp;
6380
6381 /* mark node as needing to be revoked */
6382 nfs_node_lock_force(np);
6383 if (np->n_flag & NREVOKE) { /* already revoked? */
6384 NP(np, "nfs_revoke_open_state_for_node(): already revoked");
6385 nfs_node_unlock(np);
6386 return;
6387 }
6388 np->n_flag |= NREVOKE;
6389 nfs_node_unlock(np);
6390
6391 nfs_release_open_state_for_node(np, 0);
6392 NP(np, "nfs: state lost for %p 0x%x", np, np->n_flag);
6393
6394 /* mark mount as needing a revoke scan and have the socket thread do it. */
6395 if ((nmp = NFSTONMP(np))) {
6396 lck_mtx_lock(&nmp->nm_lock);
6397 nmp->nm_state |= NFSSTA_REVOKE;
6398 nfs_mount_sock_thread_wake(nmp);
6399 lck_mtx_unlock(&nmp->nm_lock);
6400 }
6401 }
6402
6403 #if CONFIG_NFS4
6404 /*
6405 * Claim the delegated open combinations that each of this node's open files hold.
6406 */
6407 int
6408 nfs4_claim_delegated_state_for_node(nfsnode_t np, int flags)
6409 {
6410 struct nfs_open_file *nofp;
6411 int error = 0;
6412
6413 lck_mtx_lock(&np->n_openlock);
6414
6415 /* walk the open file list looking for opens with delegated state to claim */
6416 restart:
6417 TAILQ_FOREACH(nofp, &np->n_opens, nof_link) {
6418 if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw &&
6419 !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw &&
6420 !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) {
6421 continue;
6422 }
6423 lck_mtx_unlock(&np->n_openlock);
6424 error = nfs4_claim_delegated_state_for_open_file(nofp, flags);
6425 lck_mtx_lock(&np->n_openlock);
6426 if (error) {
6427 break;
6428 }
6429 goto restart;
6430 }
6431
6432 lck_mtx_unlock(&np->n_openlock);
6433
6434 return error;
6435 }
6436
6437 /*
6438 * Mark a node as needed to have its delegation returned.
6439 * Queue it up on the delegation return queue.
6440 * Make sure the thread is running.
6441 */
6442 void
6443 nfs4_delegation_return_enqueue(nfsnode_t np)
6444 {
6445 struct nfsmount *nmp;
6446
6447 nmp = NFSTONMP(np);
6448 if (nfs_mount_gone(nmp)) {
6449 return;
6450 }
6451
6452 lck_mtx_lock(&np->n_openlock);
6453 np->n_openflags |= N_DELEG_RETURN;
6454 lck_mtx_unlock(&np->n_openlock);
6455
6456 lck_mtx_lock(&nmp->nm_lock);
6457 if (np->n_dreturn.tqe_next == NFSNOLIST) {
6458 TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn);
6459 }
6460 nfs_mount_sock_thread_wake(nmp);
6461 lck_mtx_unlock(&nmp->nm_lock);
6462 }
6463
6464 /*
6465 * return any delegation we may have for the given node
6466 */
6467 int
6468 nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred)
6469 {
6470 struct nfsmount *nmp;
6471 fhandle_t fh;
6472 nfs_stateid dstateid;
6473 int error;
6474
6475 nmp = NFSTONMP(np);
6476 if (nfs_mount_gone(nmp)) {
6477 return ENXIO;
6478 }
6479
6480 /* first, make sure the node's marked for delegation return */
6481 lck_mtx_lock(&np->n_openlock);
6482 np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING);
6483 lck_mtx_unlock(&np->n_openlock);
6484
6485 /* make sure nobody else is using the delegation state */
6486 if ((error = nfs_open_state_set_busy(np, NULL))) {
6487 goto out;
6488 }
6489
6490 /* claim any delegated state */
6491 if ((error = nfs4_claim_delegated_state_for_node(np, flags))) {
6492 goto out;
6493 }
6494
6495 /* return the delegation */
6496 lck_mtx_lock(&np->n_openlock);
6497 dstateid = np->n_dstateid;
6498 fh.fh_len = np->n_fhsize;
6499 bcopy(np->n_fhp, &fh.fh_data, fh.fh_len);
6500 lck_mtx_unlock(&np->n_openlock);
6501 error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred);
6502 /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */
6503 if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) {
6504 lck_mtx_lock(&np->n_openlock);
6505 np->n_openflags &= ~N_DELEG_MASK;
6506 lck_mtx_lock(&nmp->nm_lock);
6507 if (np->n_dlink.tqe_next != NFSNOLIST) {
6508 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
6509 np->n_dlink.tqe_next = NFSNOLIST;
6510 }
6511 lck_mtx_unlock(&nmp->nm_lock);
6512 lck_mtx_unlock(&np->n_openlock);
6513 }
6514
6515 out:
6516 /* make sure it's no longer on the return queue and clear the return flags */
6517 lck_mtx_lock(&nmp->nm_lock);
6518 if (np->n_dreturn.tqe_next != NFSNOLIST) {
6519 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
6520 np->n_dreturn.tqe_next = NFSNOLIST;
6521 }
6522 lck_mtx_unlock(&nmp->nm_lock);
6523 lck_mtx_lock(&np->n_openlock);
6524 np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING);
6525 lck_mtx_unlock(&np->n_openlock);
6526
6527 if (error) {
6528 NP(np, "nfs4_delegation_return, error %d", error);
6529 if (error == ETIMEDOUT) {
6530 nfs_need_reconnect(nmp);
6531 }
6532 if (nfs_mount_state_error_should_restart(error)) {
6533 /* make sure recovery happens */
6534 lck_mtx_lock(&nmp->nm_lock);
6535 nfs_need_recover(nmp, nfs_mount_state_error_delegation_lost(error) ? NFSERR_EXPIRED : 0);
6536 lck_mtx_unlock(&nmp->nm_lock);
6537 }
6538 }
6539
6540 nfs_open_state_clear_busy(np);
6541
6542 return error;
6543 }
6544
6545 /*
6546 * RPC to return a delegation for a file handle
6547 */
6548 int
6549 nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_stateid *sid, int flags, thread_t thd, kauth_cred_t cred)
6550 {
6551 int error = 0, status, numops;
6552 uint64_t xid;
6553 struct nfsm_chain nmreq, nmrep;
6554 struct nfsreq_secinfo_args si;
6555
6556 NFSREQ_SECINFO_SET(&si, NULL, fhp, fhlen, NULL, 0);
6557 nfsm_chain_null(&nmreq);
6558 nfsm_chain_null(&nmrep);
6559
6560 // PUTFH, DELEGRETURN
6561 numops = 2;
6562 nfsm_chain_build_alloc_init(error, &nmreq, 16 * NFSX_UNSIGNED);
6563 nfsm_chain_add_compound_header(error, &nmreq, "delegreturn", nmp->nm_minor_vers, numops);
6564 numops--;
6565 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6566 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, fhp, fhlen);
6567 numops--;
6568 nfsm_chain_add_32(error, &nmreq, NFS_OP_DELEGRETURN);
6569 nfsm_chain_add_stateid(error, &nmreq, sid);
6570 nfsm_chain_build_done(error, &nmreq);
6571 nfsm_assert(error, (numops == 0), EPROTO);
6572 nfsmout_if(error);
6573 error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags, &nmrep, &xid, &status);
6574 nfsm_chain_skip_tag(error, &nmrep);
6575 nfsm_chain_get_32(error, &nmrep, numops);
6576 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
6577 nfsm_chain_op_check(error, &nmrep, NFS_OP_DELEGRETURN);
6578 nfsmout:
6579 nfsm_chain_cleanup(&nmreq);
6580 nfsm_chain_cleanup(&nmrep);
6581 return error;
6582 }
6583 #endif /* CONFIG_NFS4 */
6584
6585 /*
6586 * NFS read call.
6587 * Just call nfs_bioread() to do the work.
6588 *
6589 * Note: the exec code paths have a tendency to call VNOP_READ (and VNOP_MMAP)
6590 * without first calling VNOP_OPEN, so we make sure the file is open here.
6591 */
6592 int
6593 nfs_vnop_read(
6594 struct vnop_read_args /* {
6595 * struct vnodeop_desc *a_desc;
6596 * vnode_t a_vp;
6597 * struct uio *a_uio;
6598 * int a_ioflag;
6599 * vfs_context_t a_context;
6600 * } */*ap)
6601 {
6602 vnode_t vp = ap->a_vp;
6603 vfs_context_t ctx = ap->a_context;
6604 nfsnode_t np;
6605 struct nfsmount *nmp;
6606 struct nfs_open_owner *noop;
6607 struct nfs_open_file *nofp;
6608 int error;
6609
6610 if (vnode_vtype(ap->a_vp) != VREG) {
6611 return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM;
6612 }
6613
6614 np = VTONFS(vp);
6615 nmp = NFSTONMP(np);
6616 if (nfs_mount_gone(nmp)) {
6617 return ENXIO;
6618 }
6619 if (np->n_flag & NREVOKE) {
6620 return EIO;
6621 }
6622
6623 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6624 if (!noop) {
6625 return ENOMEM;
6626 }
6627 restart:
6628 error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1);
6629 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6630 NP(np, "nfs_vnop_read: LOST %d", kauth_cred_getuid(noop->noo_cred));
6631 error = EIO;
6632 }
6633 #if CONFIG_NFS4
6634 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6635 error = nfs4_reopen(nofp, vfs_context_thread(ctx));
6636 nofp = NULL;
6637 if (!error) {
6638 goto restart;
6639 }
6640 }
6641 #endif
6642 if (error) {
6643 nfs_open_owner_rele(noop);
6644 return error;
6645 }
6646 /*
6647 * Since the read path is a hot path, if we already have
6648 * read access, lets go and try and do the read, without
6649 * busying the mount and open file node for this open owner.
6650 *
6651 * N.B. This is inherently racy w.r.t. an execve using
6652 * an already open file, in that the read at the end of
6653 * this routine will be racing with a potential close.
6654 * The code below ultimately has the same problem. In practice
6655 * this does not seem to be an issue.
6656 */
6657 if (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) {
6658 nfs_open_owner_rele(noop);
6659 goto do_read;
6660 }
6661 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6662 if (error) {
6663 nfs_open_owner_rele(noop);
6664 return error;
6665 }
6666 /*
6667 * If we don't have a file already open with the access we need (read) then
6668 * we need to open one. Otherwise we just co-opt an open. We might not already
6669 * have access because we're trying to read the first page of the
6670 * file for execve.
6671 */
6672 error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx));
6673 if (error) {
6674 nfs_mount_state_in_use_end(nmp, 0);
6675 nfs_open_owner_rele(noop);
6676 return error;
6677 }
6678 if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) {
6679 /* we don't have the file open, so open it for read access if we're not denied */
6680 if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) {
6681 NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld",
6682 nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx)));
6683 }
6684 if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) {
6685 nfs_open_file_clear_busy(nofp);
6686 nfs_mount_state_in_use_end(nmp, 0);
6687 nfs_open_owner_rele(noop);
6688 return EPERM;
6689 }
6690 if (np->n_flag & NREVOKE) {
6691 error = EIO;
6692 nfs_open_file_clear_busy(nofp);
6693 nfs_mount_state_in_use_end(nmp, 0);
6694 nfs_open_owner_rele(noop);
6695 return error;
6696 }
6697 if (nmp->nm_vers < NFS_VER4) {
6698 /* NFS v2/v3 opens are always allowed - so just add it. */
6699 nfs_open_file_add_open(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, 0);
6700 }
6701 #if CONFIG_NFS4
6702 else {
6703 error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx);
6704 }
6705 #endif
6706 if (!error) {
6707 nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE;
6708 }
6709 }
6710 if (nofp) {
6711 nfs_open_file_clear_busy(nofp);
6712 }
6713 if (nfs_mount_state_in_use_end(nmp, error)) {
6714 nofp = NULL;
6715 goto restart;
6716 }
6717 nfs_open_owner_rele(noop);
6718 if (error) {
6719 return error;
6720 }
6721 do_read:
6722 return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context);
6723 }
6724
6725 #if CONFIG_NFS4
6726 /*
6727 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6728 * Files are created using the NFSv4 OPEN RPC. So we must open the
6729 * file to create it and then close it.
6730 */
6731 int
6732 nfs4_vnop_create(
6733 struct vnop_create_args /* {
6734 * struct vnodeop_desc *a_desc;
6735 * vnode_t a_dvp;
6736 * vnode_t *a_vpp;
6737 * struct componentname *a_cnp;
6738 * struct vnode_attr *a_vap;
6739 * vfs_context_t a_context;
6740 * } */*ap)
6741 {
6742 vfs_context_t ctx = ap->a_context;
6743 struct componentname *cnp = ap->a_cnp;
6744 struct vnode_attr *vap = ap->a_vap;
6745 vnode_t dvp = ap->a_dvp;
6746 vnode_t *vpp = ap->a_vpp;
6747 struct nfsmount *nmp;
6748 nfsnode_t np;
6749 int error = 0, busyerror = 0, accessMode, denyMode;
6750 struct nfs_open_owner *noop = NULL;
6751 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
6752
6753 nmp = VTONMP(dvp);
6754 if (nfs_mount_gone(nmp)) {
6755 return ENXIO;
6756 }
6757
6758 if (vap) {
6759 nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx);
6760 }
6761
6762 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
6763 if (!noop) {
6764 return ENOMEM;
6765 }
6766
6767 restart:
6768 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
6769 if (error) {
6770 nfs_open_owner_rele(noop);
6771 return error;
6772 }
6773
6774 /* grab a provisional, nodeless open file */
6775 error = nfs_open_file_find(NULL, noop, &newnofp, 0, 0, 1);
6776 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
6777 printf("nfs_vnop_create: LOST\n");
6778 error = EIO;
6779 }
6780 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
6781 /* This shouldn't happen given that this is a new, nodeless nofp */
6782 nfs_mount_state_in_use_end(nmp, 0);
6783 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
6784 nfs_open_file_destroy(newnofp);
6785 newnofp = NULL;
6786 if (!error) {
6787 goto restart;
6788 }
6789 }
6790 if (!error) {
6791 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
6792 }
6793 if (error) {
6794 if (newnofp) {
6795 nfs_open_file_destroy(newnofp);
6796 }
6797 newnofp = NULL;
6798 goto out;
6799 }
6800
6801 /*
6802 * We're just trying to create the file.
6803 * We'll create/open it RW, and set NFS_OPEN_FILE_CREATE.
6804 */
6805 accessMode = NFS_OPEN_SHARE_ACCESS_BOTH;
6806 denyMode = NFS_OPEN_SHARE_DENY_NONE;
6807
6808 /* Do the open/create */
6809 error = nfs4_open_rpc(newnofp, ctx, cnp, vap, dvp, vpp, NFS_OPEN_CREATE, accessMode, denyMode);
6810 if ((error == EACCES) && vap && !(vap->va_vaflags & VA_EXCLUSIVE) &&
6811 VATTR_IS_ACTIVE(vap, va_mode) && !(vap->va_mode & S_IWUSR)) {
6812 /*
6813 * Hmm... it looks like we may have a situation where the request was
6814 * retransmitted because we didn't get the first response which successfully
6815 * created/opened the file and then the second time we were denied the open
6816 * because the mode the file was created with doesn't allow write access.
6817 *
6818 * We'll try to work around this by temporarily updating the mode and
6819 * retrying the open.
6820 */
6821 struct vnode_attr vattr;
6822
6823 /* first make sure it's there */
6824 int error2 = nfs_lookitup(VTONFS(dvp), cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
6825 if (!error2 && np) {
6826 nfs_node_unlock(np);
6827 *vpp = NFSTOV(np);
6828 if (vnode_vtype(NFSTOV(np)) == VREG) {
6829 VATTR_INIT(&vattr);
6830 VATTR_SET(&vattr, va_mode, (vap->va_mode | S_IWUSR));
6831 if (!nfs4_setattr_rpc(np, &vattr, ctx)) {
6832 error2 = nfs4_open_rpc(newnofp, ctx, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, accessMode, denyMode);
6833 VATTR_INIT(&vattr);
6834 VATTR_SET(&vattr, va_mode, vap->va_mode);
6835 nfs4_setattr_rpc(np, &vattr, ctx);
6836 if (!error2) {
6837 error = 0;
6838 }
6839 }
6840 }
6841 if (error) {
6842 vnode_put(*vpp);
6843 *vpp = NULL;
6844 }
6845 }
6846 }
6847 if (!error && !*vpp) {
6848 printf("nfs4_open_rpc returned without a node?\n");
6849 /* Hmmm... with no node, we have no filehandle and can't close it */
6850 error = EIO;
6851 }
6852 if (error) {
6853 /* need to cleanup our temporary nofp */
6854 nfs_open_file_clear_busy(newnofp);
6855 nfs_open_file_destroy(newnofp);
6856 newnofp = NULL;
6857 goto out;
6858 }
6859 /* After we have a node, add our open file struct to the node */
6860 np = VTONFS(*vpp);
6861 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
6862 nofp = newnofp;
6863 error = nfs_open_file_find_internal(np, noop, &nofp, 0, 0, 0);
6864 if (error) {
6865 /* This shouldn't happen, because we passed in a new nofp to use. */
6866 printf("nfs_open_file_find_internal failed! %d\n", error);
6867 goto out;
6868 } else if (nofp != newnofp) {
6869 /*
6870 * Hmm... an open file struct already exists.
6871 * Mark the existing one busy and merge our open into it.
6872 * Then destroy the one we created.
6873 * Note: there's no chance of an open confict because the
6874 * open has already been granted.
6875 */
6876 busyerror = nfs_open_file_set_busy(nofp, NULL);
6877 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
6878 nofp->nof_stateid = newnofp->nof_stateid;
6879 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
6880 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
6881 }
6882 nfs_open_file_clear_busy(newnofp);
6883 nfs_open_file_destroy(newnofp);
6884 }
6885 newnofp = NULL;
6886 /* mark the node as holding a create-initiated open */
6887 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
6888 nofp->nof_creator = current_thread();
6889 out:
6890 if (nofp && !busyerror) {
6891 nfs_open_file_clear_busy(nofp);
6892 }
6893 if (nfs_mount_state_in_use_end(nmp, error)) {
6894 nofp = newnofp = NULL;
6895 busyerror = 0;
6896 goto restart;
6897 }
6898 if (noop) {
6899 nfs_open_owner_rele(noop);
6900 }
6901 return error;
6902 }
6903
6904 /*
6905 * Note: the NFSv4 CREATE RPC is for everything EXCEPT regular files.
6906 */
6907 int
6908 nfs4_create_rpc(
6909 vfs_context_t ctx,
6910 nfsnode_t dnp,
6911 struct componentname *cnp,
6912 struct vnode_attr *vap,
6913 int type,
6914 char *link,
6915 nfsnode_t *npp)
6916 {
6917 struct nfsmount *nmp;
6918 struct nfs_vattr nvattr;
6919 int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status;
6920 int nfsvers, namedattrs, numops;
6921 u_int64_t xid, savedxid = 0;
6922 nfsnode_t np = NULL;
6923 vnode_t newvp = NULL;
6924 struct nfsm_chain nmreq, nmrep;
6925 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
6926 const char *tag;
6927 nfs_specdata sd;
6928 fhandle_t fh;
6929 struct nfsreq rq, *req = &rq;
6930 struct nfs_dulookup dul;
6931 struct nfsreq_secinfo_args si;
6932
6933 nmp = NFSTONMP(dnp);
6934 if (nfs_mount_gone(nmp)) {
6935 return ENXIO;
6936 }
6937 nfsvers = nmp->nm_vers;
6938 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
6939 if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
6940 return EINVAL;
6941 }
6942
6943 sd.specdata1 = sd.specdata2 = 0;
6944
6945 switch (type) {
6946 case NFLNK:
6947 tag = "symlink";
6948 break;
6949 case NFBLK:
6950 case NFCHR:
6951 tag = "mknod";
6952 if (!VATTR_IS_ACTIVE(vap, va_rdev)) {
6953 return EINVAL;
6954 }
6955 sd.specdata1 = major(vap->va_rdev);
6956 sd.specdata2 = minor(vap->va_rdev);
6957 break;
6958 case NFSOCK:
6959 case NFFIFO:
6960 tag = "mknod";
6961 break;
6962 case NFDIR:
6963 tag = "mkdir";
6964 break;
6965 default:
6966 return EINVAL;
6967 }
6968
6969 nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx);
6970
6971 error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx));
6972 if (!namedattrs) {
6973 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
6974 }
6975
6976 NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0);
6977 NVATTR_INIT(&nvattr);
6978 nfsm_chain_null(&nmreq);
6979 nfsm_chain_null(&nmrep);
6980
6981 // PUTFH, SAVEFH, CREATE, GETATTR(FH), RESTOREFH, GETATTR
6982 numops = 6;
6983 nfsm_chain_build_alloc_init(error, &nmreq, 66 * NFSX_UNSIGNED);
6984 nfsm_chain_add_compound_header(error, &nmreq, tag, nmp->nm_minor_vers, numops);
6985 numops--;
6986 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
6987 nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize);
6988 numops--;
6989 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
6990 numops--;
6991 nfsm_chain_add_32(error, &nmreq, NFS_OP_CREATE);
6992 nfsm_chain_add_32(error, &nmreq, type);
6993 if (type == NFLNK) {
6994 nfsm_chain_add_name(error, &nmreq, link, strlen(link), nmp);
6995 } else if ((type == NFBLK) || (type == NFCHR)) {
6996 nfsm_chain_add_32(error, &nmreq, sd.specdata1);
6997 nfsm_chain_add_32(error, &nmreq, sd.specdata2);
6998 }
6999 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7000 nfsm_chain_add_fattr4(error, &nmreq, vap, nmp);
7001 numops--;
7002 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7003 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7004 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7005 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
7006 numops--;
7007 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7008 numops--;
7009 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7010 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp);
7011 nfsm_chain_build_done(error, &nmreq);
7012 nfsm_assert(error, (numops == 0), EPROTO);
7013 nfsmout_if(error);
7014
7015 error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND,
7016 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7017 if (!error) {
7018 if (!namedattrs) {
7019 nfs_dulookup_start(&dul, dnp, ctx);
7020 }
7021 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7022 }
7023
7024 if ((lockerror = nfs_node_lock(dnp))) {
7025 error = lockerror;
7026 }
7027 nfsm_chain_skip_tag(error, &nmrep);
7028 nfsm_chain_get_32(error, &nmrep, numops);
7029 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7030 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7031 nfsmout_if(error);
7032 nfsm_chain_op_check(error, &nmrep, NFS_OP_CREATE);
7033 nfsm_chain_check_change_info(error, &nmrep, dnp);
7034 bmlen = NFS_ATTR_BITMAP_LEN;
7035 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7036 /* At this point if we have no error, the object was created. */
7037 /* if we don't get attributes, then we should lookitup. */
7038 create_error = error;
7039 nfsmout_if(error);
7040 nfs_vattr_set_supported(bitmap, vap);
7041 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7042 nfsmout_if(error);
7043 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7044 nfsmout_if(error);
7045 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
7046 printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr);
7047 error = EBADRPC;
7048 goto nfsmout;
7049 }
7050 /* directory attributes: if we don't get them, make sure to invalidate */
7051 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7052 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7053 savedxid = xid;
7054 nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid);
7055 if (error) {
7056 NATTRINVALIDATE(dnp);
7057 }
7058
7059 nfsmout:
7060 nfsm_chain_cleanup(&nmreq);
7061 nfsm_chain_cleanup(&nmrep);
7062
7063 if (!lockerror) {
7064 if (!create_error && (dnp->n_flag & NNEGNCENTRIES)) {
7065 dnp->n_flag &= ~NNEGNCENTRIES;
7066 cache_purge_negatives(NFSTOV(dnp));
7067 }
7068 dnp->n_flag |= NMODIFIED;
7069 nfs_node_unlock(dnp);
7070 /* nfs_getattr() will check changed and purge caches */
7071 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7072 }
7073
7074 if (!error && fh.fh_len) {
7075 /* create the vnode with the filehandle and attributes */
7076 xid = savedxid;
7077 error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np);
7078 if (!error) {
7079 newvp = NFSTOV(np);
7080 }
7081 }
7082 NVATTR_CLEANUP(&nvattr);
7083
7084 if (!namedattrs) {
7085 nfs_dulookup_finish(&dul, dnp, ctx);
7086 }
7087
7088 /*
7089 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
7090 * if we can succeed in looking up the object.
7091 */
7092 if ((create_error == EEXIST) || (!create_error && !newvp)) {
7093 error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np);
7094 if (!error) {
7095 newvp = NFSTOV(np);
7096 if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) {
7097 error = EEXIST;
7098 }
7099 }
7100 }
7101 if (!busyerror) {
7102 nfs_node_clear_busy(dnp);
7103 }
7104 if (error) {
7105 if (newvp) {
7106 nfs_node_unlock(np);
7107 vnode_put(newvp);
7108 }
7109 } else {
7110 nfs_node_unlock(np);
7111 *npp = np;
7112 }
7113 return error;
7114 }
7115
7116 int
7117 nfs4_vnop_mknod(
7118 struct vnop_mknod_args /* {
7119 * struct vnodeop_desc *a_desc;
7120 * vnode_t a_dvp;
7121 * vnode_t *a_vpp;
7122 * struct componentname *a_cnp;
7123 * struct vnode_attr *a_vap;
7124 * vfs_context_t a_context;
7125 * } */*ap)
7126 {
7127 nfsnode_t np = NULL;
7128 struct nfsmount *nmp;
7129 int error;
7130
7131 nmp = VTONMP(ap->a_dvp);
7132 if (nfs_mount_gone(nmp)) {
7133 return ENXIO;
7134 }
7135
7136 if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) {
7137 return EINVAL;
7138 }
7139 switch (ap->a_vap->va_type) {
7140 case VBLK:
7141 case VCHR:
7142 case VFIFO:
7143 case VSOCK:
7144 break;
7145 default:
7146 return ENOTSUP;
7147 }
7148
7149 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7150 vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np);
7151 if (!error) {
7152 *ap->a_vpp = NFSTOV(np);
7153 }
7154 return error;
7155 }
7156
7157 int
7158 nfs4_vnop_mkdir(
7159 struct vnop_mkdir_args /* {
7160 * struct vnodeop_desc *a_desc;
7161 * vnode_t a_dvp;
7162 * vnode_t *a_vpp;
7163 * struct componentname *a_cnp;
7164 * struct vnode_attr *a_vap;
7165 * vfs_context_t a_context;
7166 * } */*ap)
7167 {
7168 nfsnode_t np = NULL;
7169 int error;
7170
7171 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7172 NFDIR, NULL, &np);
7173 if (!error) {
7174 *ap->a_vpp = NFSTOV(np);
7175 }
7176 return error;
7177 }
7178
7179 int
7180 nfs4_vnop_symlink(
7181 struct vnop_symlink_args /* {
7182 * struct vnodeop_desc *a_desc;
7183 * vnode_t a_dvp;
7184 * vnode_t *a_vpp;
7185 * struct componentname *a_cnp;
7186 * struct vnode_attr *a_vap;
7187 * char *a_target;
7188 * vfs_context_t a_context;
7189 * } */*ap)
7190 {
7191 nfsnode_t np = NULL;
7192 int error;
7193
7194 error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap,
7195 NFLNK, ap->a_target, &np);
7196 if (!error) {
7197 *ap->a_vpp = NFSTOV(np);
7198 }
7199 return error;
7200 }
7201
7202 int
7203 nfs4_vnop_link(
7204 struct vnop_link_args /* {
7205 * struct vnodeop_desc *a_desc;
7206 * vnode_t a_vp;
7207 * vnode_t a_tdvp;
7208 * struct componentname *a_cnp;
7209 * vfs_context_t a_context;
7210 * } */*ap)
7211 {
7212 vfs_context_t ctx = ap->a_context;
7213 vnode_t vp = ap->a_vp;
7214 vnode_t tdvp = ap->a_tdvp;
7215 struct componentname *cnp = ap->a_cnp;
7216 int error = 0, lockerror = ENOENT, status;
7217 struct nfsmount *nmp;
7218 nfsnode_t np = VTONFS(vp);
7219 nfsnode_t tdnp = VTONFS(tdvp);
7220 int nfsvers, numops;
7221 u_int64_t xid, savedxid;
7222 struct nfsm_chain nmreq, nmrep;
7223 struct nfsreq_secinfo_args si;
7224
7225 if (vnode_mount(vp) != vnode_mount(tdvp)) {
7226 return EXDEV;
7227 }
7228
7229 nmp = VTONMP(vp);
7230 if (nfs_mount_gone(nmp)) {
7231 return ENXIO;
7232 }
7233 nfsvers = nmp->nm_vers;
7234 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7235 return EINVAL;
7236 }
7237 if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7238 return EINVAL;
7239 }
7240
7241 /*
7242 * Push all writes to the server, so that the attribute cache
7243 * doesn't get "out of sync" with the server.
7244 * XXX There should be a better way!
7245 */
7246 nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR);
7247
7248 if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) {
7249 return error;
7250 }
7251
7252 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7253 nfsm_chain_null(&nmreq);
7254 nfsm_chain_null(&nmrep);
7255
7256 // PUTFH(SOURCE), SAVEFH, PUTFH(DIR), LINK, GETATTR(DIR), RESTOREFH, GETATTR
7257 numops = 7;
7258 nfsm_chain_build_alloc_init(error, &nmreq, 29 * NFSX_UNSIGNED + cnp->cn_namelen);
7259 nfsm_chain_add_compound_header(error, &nmreq, "link", nmp->nm_minor_vers, numops);
7260 numops--;
7261 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7262 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
7263 numops--;
7264 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7265 numops--;
7266 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7267 nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize);
7268 numops--;
7269 nfsm_chain_add_32(error, &nmreq, NFS_OP_LINK);
7270 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7271 numops--;
7272 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7273 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, tdnp);
7274 numops--;
7275 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7276 numops--;
7277 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7278 nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np);
7279 nfsm_chain_build_done(error, &nmreq);
7280 nfsm_assert(error, (numops == 0), EPROTO);
7281 nfsmout_if(error);
7282 error = nfs_request(tdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status);
7283
7284 if ((lockerror = nfs_node_lock2(tdnp, np))) {
7285 error = lockerror;
7286 goto nfsmout;
7287 }
7288 nfsm_chain_skip_tag(error, &nmrep);
7289 nfsm_chain_get_32(error, &nmrep, numops);
7290 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7291 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
7292 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7293 nfsm_chain_op_check(error, &nmrep, NFS_OP_LINK);
7294 nfsm_chain_check_change_info(error, &nmrep, tdnp);
7295 /* directory attributes: if we don't get them, make sure to invalidate */
7296 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7297 savedxid = xid;
7298 nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid);
7299 if (error) {
7300 NATTRINVALIDATE(tdnp);
7301 }
7302 /* link attributes: if we don't get them, make sure to invalidate */
7303 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
7304 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7305 xid = savedxid;
7306 nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid);
7307 if (error) {
7308 NATTRINVALIDATE(np);
7309 }
7310 nfsmout:
7311 nfsm_chain_cleanup(&nmreq);
7312 nfsm_chain_cleanup(&nmrep);
7313 if (!lockerror) {
7314 tdnp->n_flag |= NMODIFIED;
7315 }
7316 /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */
7317 if (error == EEXIST) {
7318 error = 0;
7319 }
7320 if (!error && (tdnp->n_flag & NNEGNCENTRIES)) {
7321 tdnp->n_flag &= ~NNEGNCENTRIES;
7322 cache_purge_negatives(tdvp);
7323 }
7324 if (!lockerror) {
7325 nfs_node_unlock2(tdnp, np);
7326 }
7327 nfs_node_clear_busy2(tdnp, np);
7328 return error;
7329 }
7330
7331 int
7332 nfs4_vnop_rmdir(
7333 struct vnop_rmdir_args /* {
7334 * struct vnodeop_desc *a_desc;
7335 * vnode_t a_dvp;
7336 * vnode_t a_vp;
7337 * struct componentname *a_cnp;
7338 * vfs_context_t a_context;
7339 * } */*ap)
7340 {
7341 vfs_context_t ctx = ap->a_context;
7342 vnode_t vp = ap->a_vp;
7343 vnode_t dvp = ap->a_dvp;
7344 struct componentname *cnp = ap->a_cnp;
7345 struct nfsmount *nmp;
7346 int error = 0, namedattrs;
7347 nfsnode_t np = VTONFS(vp);
7348 nfsnode_t dnp = VTONFS(dvp);
7349 struct nfs_dulookup dul;
7350
7351 if (vnode_vtype(vp) != VDIR) {
7352 return EINVAL;
7353 }
7354
7355 nmp = NFSTONMP(dnp);
7356 if (nfs_mount_gone(nmp)) {
7357 return ENXIO;
7358 }
7359 namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR);
7360
7361 if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) {
7362 return error;
7363 }
7364
7365 if (!namedattrs) {
7366 nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx);
7367 nfs_dulookup_start(&dul, dnp, ctx);
7368 }
7369
7370 error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen,
7371 vfs_context_thread(ctx), vfs_context_ucred(ctx));
7372
7373 nfs_name_cache_purge(dnp, np, cnp, ctx);
7374 /* nfs_getattr() will check changed and purge caches */
7375 nfs_getattr(dnp, NULL, ctx, NGA_CACHED);
7376 if (!namedattrs) {
7377 nfs_dulookup_finish(&dul, dnp, ctx);
7378 }
7379 nfs_node_clear_busy2(dnp, np);
7380
7381 /*
7382 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
7383 */
7384 if (error == ENOENT) {
7385 error = 0;
7386 }
7387 if (!error) {
7388 /*
7389 * remove nfsnode from hash now so we can't accidentally find it
7390 * again if another object gets created with the same filehandle
7391 * before this vnode gets reclaimed
7392 */
7393 lck_mtx_lock(nfs_node_hash_mutex);
7394 if (np->n_hflag & NHHASHED) {
7395 LIST_REMOVE(np, n_hash);
7396 np->n_hflag &= ~NHHASHED;
7397 FSDBG(266, 0, np, np->n_flag, 0xb1eb1e);
7398 }
7399 lck_mtx_unlock(nfs_node_hash_mutex);
7400 }
7401 return error;
7402 }
7403
7404 /*
7405 * NFSv4 Named Attributes
7406 *
7407 * Both the extended attributes interface and the named streams interface
7408 * are backed by NFSv4 named attributes. The implementations for both use
7409 * a common set of routines in an attempt to reduce code duplication, to
7410 * increase efficiency, to increase caching of both names and data, and to
7411 * confine the complexity.
7412 *
7413 * Each NFS node caches its named attribute directory's file handle.
7414 * The directory nodes for the named attribute directories are handled
7415 * exactly like regular directories (with a couple minor exceptions).
7416 * Named attribute nodes are also treated as much like regular files as
7417 * possible.
7418 *
7419 * Most of the heavy lifting is done by nfs4_named_attr_get().
7420 */
7421
7422 /*
7423 * Get the given node's attribute directory node.
7424 * If !fetch, then only return a cached node.
7425 * Otherwise, we will attempt to fetch the node from the server.
7426 * (Note: the node should be marked busy.)
7427 */
7428 nfsnode_t
7429 nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
7430 {
7431 nfsnode_t adnp = NULL;
7432 struct nfsmount *nmp;
7433 int error = 0, status, numops;
7434 struct nfsm_chain nmreq, nmrep;
7435 u_int64_t xid;
7436 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
7437 fhandle_t fh;
7438 struct nfs_vattr nvattr;
7439 struct componentname cn;
7440 struct nfsreq rq, *req = &rq;
7441 struct nfsreq_secinfo_args si;
7442
7443 nmp = NFSTONMP(np);
7444 if (nfs_mount_gone(nmp)) {
7445 return NULL;
7446 }
7447 if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
7448 return NULL;
7449 }
7450
7451 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7452 NVATTR_INIT(&nvattr);
7453 nfsm_chain_null(&nmreq);
7454 nfsm_chain_null(&nmrep);
7455
7456 bzero(&cn, sizeof(cn));
7457 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7458 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7459 cn.cn_nameiop = LOOKUP;
7460
7461 if (np->n_attrdirfh) {
7462 // XXX can't set parent correctly (to np) yet
7463 error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
7464 NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
7465 if (adnp) {
7466 goto nfsmout;
7467 }
7468 }
7469 if (!fetch) {
7470 error = ENOENT;
7471 goto nfsmout;
7472 }
7473
7474 // PUTFH, OPENATTR, GETATTR
7475 numops = 3;
7476 nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
7477 nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
7478 numops--;
7479 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7480 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7481 numops--;
7482 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7483 nfsm_chain_add_32(error, &nmreq, 0);
7484 numops--;
7485 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7486 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7487 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7488 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7489 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7490 nfsm_chain_build_done(error, &nmreq);
7491 nfsm_assert(error, (numops == 0), EPROTO);
7492 nfsmout_if(error);
7493 error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
7494 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
7495 if (!error) {
7496 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7497 }
7498
7499 nfsm_chain_skip_tag(error, &nmrep);
7500 nfsm_chain_get_32(error, &nmrep, numops);
7501 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7502 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7503 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7504 nfsmout_if(error);
7505 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7506 nfsmout_if(error);
7507 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
7508 error = ENOENT;
7509 goto nfsmout;
7510 }
7511 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7512 /* (re)allocate attrdir fh buffer */
7513 if (np->n_attrdirfh) {
7514 FREE(np->n_attrdirfh, M_TEMP);
7515 }
7516 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7517 }
7518 if (!np->n_attrdirfh) {
7519 error = ENOMEM;
7520 goto nfsmout;
7521 }
7522 /* cache the attrdir fh in the node */
7523 *np->n_attrdirfh = fh.fh_len;
7524 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7525 /* create node for attrdir */
7526 // XXX can't set parent correctly (to np) yet
7527 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7528 nfsmout:
7529 NVATTR_CLEANUP(&nvattr);
7530 nfsm_chain_cleanup(&nmreq);
7531 nfsm_chain_cleanup(&nmrep);
7532
7533 if (adnp) {
7534 /* sanity check that this node is an attribute directory */
7535 if (adnp->n_vattr.nva_type != VDIR) {
7536 error = EINVAL;
7537 }
7538 if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
7539 error = EINVAL;
7540 }
7541 nfs_node_unlock(adnp);
7542 if (error) {
7543 vnode_put(NFSTOV(adnp));
7544 }
7545 }
7546 return error ? NULL : adnp;
7547 }
7548
7549 /*
7550 * Get the given node's named attribute node for the name given.
7551 *
7552 * In an effort to increase the performance of named attribute access, we try
7553 * to reduce server requests by doing the following:
7554 *
7555 * - cache the node's named attribute directory file handle in the node
7556 * - maintain a directory vnode for the attribute directory
7557 * - use name cache entries (positive and negative) to speed up lookups
7558 * - optionally open the named attribute (with the given accessMode) in the same RPC
7559 * - combine attribute directory retrieval with the lookup/open RPC
7560 * - optionally prefetch the named attribute's first block of data in the same RPC
7561 *
7562 * Also, in an attempt to reduce the number of copies/variations of this code,
7563 * parts of the RPC building/processing code are conditionalized on what is
7564 * needed for any particular request (openattr, lookup vs. open, read).
7565 *
7566 * Note that because we may not have the attribute directory node when we start
7567 * the lookup/open, we lock both the node and the attribute directory node.
7568 */
7569
7570 #define NFS_GET_NAMED_ATTR_CREATE 0x1
7571 #define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
7572 #define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
7573 #define NFS_GET_NAMED_ATTR_PREFETCH 0x8
7574
7575 int
7576 nfs4_named_attr_get(
7577 nfsnode_t np,
7578 struct componentname *cnp,
7579 uint32_t accessMode,
7580 int flags,
7581 vfs_context_t ctx,
7582 nfsnode_t *anpp,
7583 struct nfs_open_file **nofpp)
7584 {
7585 struct nfsmount *nmp;
7586 int error = 0, open_error = EIO;
7587 int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
7588 int create, guarded, prefetch, truncate, noopbusy = 0;
7589 int open, status, numops, hadattrdir, negnamecache;
7590 struct nfs_vattr nvattr;
7591 struct vnode_attr vattr;
7592 nfsnode_t adnp = NULL, anp = NULL;
7593 vnode_t avp = NULL;
7594 u_int64_t xid, savedxid = 0;
7595 struct nfsm_chain nmreq, nmrep;
7596 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
7597 uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
7598 nfs_stateid stateid, dstateid;
7599 fhandle_t fh;
7600 struct nfs_open_owner *noop = NULL;
7601 struct nfs_open_file *newnofp = NULL, *nofp = NULL;
7602 struct vnop_access_args naa;
7603 thread_t thd;
7604 kauth_cred_t cred;
7605 struct timeval now;
7606 char sbuf[64], *s;
7607 uint32_t ace_type, ace_flags, ace_mask, len, slen;
7608 struct kauth_ace ace;
7609 struct nfsreq rq, *req = &rq;
7610 struct nfsreq_secinfo_args si;
7611
7612 *anpp = NULL;
7613 fh.fh_len = 0;
7614 rflags = delegation = recall = eof = rlen = retlen = 0;
7615 ace.ace_flags = 0;
7616 s = sbuf;
7617 slen = sizeof(sbuf);
7618
7619 nmp = NFSTONMP(np);
7620 if (nfs_mount_gone(nmp)) {
7621 return ENXIO;
7622 }
7623 NVATTR_INIT(&nvattr);
7624 negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
7625 thd = vfs_context_thread(ctx);
7626 cred = vfs_context_ucred(ctx);
7627 create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
7628 guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
7629 truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
7630 prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
7631
7632 if (!create) {
7633 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
7634 if (error) {
7635 return error;
7636 }
7637 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
7638 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
7639 return ENOATTR;
7640 }
7641 } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
7642 /* shouldn't happen... but just be safe */
7643 printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
7644 accessMode = NFS_OPEN_SHARE_ACCESS_READ;
7645 }
7646 open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
7647 if (open) {
7648 /*
7649 * We're trying to open the file.
7650 * We'll create/open it with the given access mode,
7651 * and set NFS_OPEN_FILE_CREATE.
7652 */
7653 denyMode = NFS_OPEN_SHARE_DENY_NONE;
7654 if (prefetch && guarded) {
7655 prefetch = 0; /* no sense prefetching data that can't be there */
7656 }
7657 noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
7658 if (!noop) {
7659 return ENOMEM;
7660 }
7661 }
7662
7663 if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
7664 return error;
7665 }
7666
7667 adnp = nfs4_named_attr_dir_get(np, 0, ctx);
7668 hadattrdir = (adnp != NULL);
7669 if (prefetch) {
7670 microuptime(&now);
7671 /* use the special state ID because we don't have a real one to send */
7672 stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
7673 rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
7674 }
7675 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
7676 nfsm_chain_null(&nmreq);
7677 nfsm_chain_null(&nmrep);
7678
7679 if (hadattrdir) {
7680 if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
7681 goto nfsmout;
7682 }
7683 /* nfs_getattr() will check changed and purge caches */
7684 error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
7685 nfsmout_if(error);
7686 error = cache_lookup(NFSTOV(adnp), &avp, cnp);
7687 switch (error) {
7688 case ENOENT:
7689 /* negative cache entry */
7690 goto nfsmout;
7691 case 0:
7692 /* cache miss */
7693 /* try dir buf cache lookup */
7694 error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
7695 if (!error && anp) {
7696 /* dir buf cache hit */
7697 *anpp = anp;
7698 error = -1;
7699 }
7700 if (error != -1) { /* cache miss */
7701 break;
7702 }
7703 /* FALLTHROUGH */
7704 case -1:
7705 /* cache hit, not really an error */
7706 OSAddAtomic64(1, &nfsstats.lookupcache_hits);
7707 if (!anp && avp) {
7708 *anpp = anp = VTONFS(avp);
7709 }
7710
7711 nfs_node_clear_busy(adnp);
7712 adbusyerror = ENOENT;
7713
7714 /* check for directory access */
7715 naa.a_desc = &vnop_access_desc;
7716 naa.a_vp = NFSTOV(adnp);
7717 naa.a_action = KAUTH_VNODE_SEARCH;
7718 naa.a_context = ctx;
7719
7720 /* compute actual success/failure based on accessibility */
7721 error = nfs_vnop_access(&naa);
7722 /* FALLTHROUGH */
7723 default:
7724 /* we either found it, or hit an error */
7725 if (!error && guarded) {
7726 /* found cached entry but told not to use it */
7727 error = EEXIST;
7728 vnode_put(NFSTOV(anp));
7729 *anpp = anp = NULL;
7730 }
7731 /* we're done if error or we don't need to open */
7732 if (error || !open) {
7733 goto nfsmout;
7734 }
7735 /* no error and we need to open... */
7736 }
7737 }
7738
7739 if (open) {
7740 restart:
7741 error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
7742 if (error) {
7743 nfs_open_owner_rele(noop);
7744 noop = NULL;
7745 goto nfsmout;
7746 }
7747 inuse = 1;
7748
7749 /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
7750 error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
7751 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
7752 printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
7753 error = EIO;
7754 }
7755 if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
7756 nfs_mount_state_in_use_end(nmp, 0);
7757 error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
7758 nfs_open_file_destroy(newnofp);
7759 newnofp = NULL;
7760 if (!error) {
7761 goto restart;
7762 }
7763 }
7764 if (!error) {
7765 error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
7766 }
7767 if (error) {
7768 if (newnofp) {
7769 nfs_open_file_destroy(newnofp);
7770 }
7771 newnofp = NULL;
7772 goto nfsmout;
7773 }
7774 if (anp) {
7775 /*
7776 * We already have the node. So we just need to open
7777 * it - which we may be able to do with a delegation.
7778 */
7779 open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
7780 if (!error) {
7781 /* open succeeded, so our open file is no longer temporary */
7782 nofp = newnofp;
7783 nofpbusyerror = 0;
7784 newnofp = NULL;
7785 if (nofpp) {
7786 *nofpp = nofp;
7787 }
7788 }
7789 goto nfsmout;
7790 }
7791 }
7792
7793 /*
7794 * We either don't have the attrdir or we didn't find the attribute
7795 * in the name cache, so we need to talk to the server.
7796 *
7797 * If we don't have the attrdir, we'll need to ask the server for that too.
7798 * If the caller is requesting that the attribute be created, we need to
7799 * make sure the attrdir is created.
7800 * The caller may also request that the first block of an existing attribute
7801 * be retrieved at the same time.
7802 */
7803
7804 if (open) {
7805 /* need to mark the open owner busy during the RPC */
7806 if ((error = nfs_open_owner_set_busy(noop, thd))) {
7807 goto nfsmout;
7808 }
7809 noopbusy = 1;
7810 }
7811
7812 /*
7813 * We'd like to get updated post-open/lookup attributes for the
7814 * directory and we may also want to prefetch some data via READ.
7815 * We'd like the READ results to be last so that we can leave the
7816 * data in the mbufs until the end.
7817 *
7818 * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
7819 */
7820 numops = 5;
7821 if (!hadattrdir) {
7822 numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
7823 }
7824 if (prefetch) {
7825 numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
7826 }
7827 nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
7828 nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
7829 if (hadattrdir) {
7830 numops--;
7831 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7832 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7833 } else {
7834 numops--;
7835 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7836 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7837 numops--;
7838 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7839 nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
7840 numops--;
7841 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7842 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7843 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7844 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7845 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7846 }
7847 if (open) {
7848 numops--;
7849 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
7850 nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
7851 nfsm_chain_add_32(error, &nmreq, accessMode);
7852 nfsm_chain_add_32(error, &nmreq, denyMode);
7853 nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
7854 nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
7855 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
7856 nfsm_chain_add_32(error, &nmreq, create);
7857 if (create) {
7858 nfsm_chain_add_32(error, &nmreq, guarded);
7859 VATTR_INIT(&vattr);
7860 if (truncate) {
7861 VATTR_SET(&vattr, va_data_size, 0);
7862 }
7863 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7864 }
7865 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
7866 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7867 } else {
7868 numops--;
7869 nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
7870 nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
7871 }
7872 numops--;
7873 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7874 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
7875 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
7876 nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
7877 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7878 if (prefetch) {
7879 numops--;
7880 nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
7881 }
7882 if (hadattrdir) {
7883 numops--;
7884 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7885 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
7886 } else {
7887 numops--;
7888 nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
7889 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
7890 numops--;
7891 nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
7892 nfsm_chain_add_32(error, &nmreq, 0);
7893 }
7894 numops--;
7895 nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
7896 nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
7897 NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
7898 if (prefetch) {
7899 numops--;
7900 nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
7901 numops--;
7902 nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
7903 VATTR_INIT(&vattr);
7904 VATTR_SET(&vattr, va_data_size, 0);
7905 nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
7906 numops--;
7907 nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
7908 nfsm_chain_add_stateid(error, &nmreq, &stateid);
7909 nfsm_chain_add_64(error, &nmreq, 0);
7910 nfsm_chain_add_32(error, &nmreq, rlen);
7911 }
7912 nfsm_chain_build_done(error, &nmreq);
7913 nfsm_assert(error, (numops == 0), EPROTO);
7914 nfsmout_if(error);
7915 error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
7916 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
7917 if (!error) {
7918 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
7919 }
7920
7921 if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
7922 error = adlockerror;
7923 }
7924 savedxid = xid;
7925 nfsm_chain_skip_tag(error, &nmrep);
7926 nfsm_chain_get_32(error, &nmrep, numops);
7927 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
7928 if (!hadattrdir) {
7929 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
7930 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
7931 nfsmout_if(error);
7932 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
7933 nfsmout_if(error);
7934 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
7935 if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
7936 /* (re)allocate attrdir fh buffer */
7937 if (np->n_attrdirfh) {
7938 FREE(np->n_attrdirfh, M_TEMP);
7939 }
7940 MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
7941 }
7942 if (np->n_attrdirfh) {
7943 /* remember the attrdir fh in the node */
7944 *np->n_attrdirfh = fh.fh_len;
7945 bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
7946 /* create busied node for attrdir */
7947 struct componentname cn;
7948 bzero(&cn, sizeof(cn));
7949 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
7950 cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
7951 cn.cn_nameiop = LOOKUP;
7952 // XXX can't set parent correctly (to np) yet
7953 error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
7954 if (!error) {
7955 adlockerror = 0;
7956 /* set the node busy */
7957 SET(adnp->n_flag, NBUSY);
7958 adbusyerror = 0;
7959 }
7960 /* if no adnp, oh well... */
7961 error = 0;
7962 }
7963 }
7964 NVATTR_CLEANUP(&nvattr);
7965 fh.fh_len = 0;
7966 }
7967 if (open) {
7968 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
7969 nfs_owner_seqid_increment(noop, NULL, error);
7970 nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
7971 nfsm_chain_check_change_info(error, &nmrep, adnp);
7972 nfsm_chain_get_32(error, &nmrep, rflags);
7973 bmlen = NFS_ATTR_BITMAP_LEN;
7974 nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
7975 nfsm_chain_get_32(error, &nmrep, delegation);
7976 if (!error) {
7977 switch (delegation) {
7978 case NFS_OPEN_DELEGATE_NONE:
7979 break;
7980 case NFS_OPEN_DELEGATE_READ:
7981 case NFS_OPEN_DELEGATE_WRITE:
7982 nfsm_chain_get_stateid(error, &nmrep, &dstateid);
7983 nfsm_chain_get_32(error, &nmrep, recall);
7984 if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
7985 nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
7986 }
7987 /* if we have any trouble accepting the ACE, just invalidate it */
7988 ace_type = ace_flags = ace_mask = len = 0;
7989 nfsm_chain_get_32(error, &nmrep, ace_type);
7990 nfsm_chain_get_32(error, &nmrep, ace_flags);
7991 nfsm_chain_get_32(error, &nmrep, ace_mask);
7992 nfsm_chain_get_32(error, &nmrep, len);
7993 ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
7994 ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
7995 ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
7996 if (!error && (len >= slen)) {
7997 MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
7998 if (s) {
7999 slen = len + 1;
8000 } else {
8001 ace.ace_flags = 0;
8002 }
8003 }
8004 if (s) {
8005 nfsm_chain_get_opaque(error, &nmrep, len, s);
8006 } else {
8007 nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
8008 }
8009 if (!error && s) {
8010 s[len] = '\0';
8011 if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
8012 ace.ace_flags = 0;
8013 }
8014 }
8015 if (error || !s) {
8016 ace.ace_flags = 0;
8017 }
8018 if (s && (s != sbuf)) {
8019 FREE(s, M_TEMP);
8020 }
8021 break;
8022 default:
8023 error = EBADRPC;
8024 break;
8025 }
8026 }
8027 /* At this point if we have no error, the object was created/opened. */
8028 open_error = error;
8029 } else {
8030 nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
8031 }
8032 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8033 nfsmout_if(error);
8034 error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
8035 nfsmout_if(error);
8036 if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
8037 error = EIO;
8038 goto nfsmout;
8039 }
8040 if (prefetch) {
8041 nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
8042 }
8043 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
8044 if (!hadattrdir) {
8045 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
8046 }
8047 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
8048 nfsmout_if(error);
8049 xid = savedxid;
8050 nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
8051 nfsmout_if(error);
8052
8053 if (open) {
8054 if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
8055 newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8056 }
8057 if (rflags & NFS_OPEN_RESULT_CONFIRM) {
8058 if (adnp) {
8059 nfs_node_unlock(adnp);
8060 adlockerror = ENOENT;
8061 }
8062 NVATTR_CLEANUP(&nvattr);
8063 error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
8064 nfsmout_if(error);
8065 savedxid = xid;
8066 if ((adlockerror = nfs_node_lock(adnp))) {
8067 error = adlockerror;
8068 }
8069 }
8070 }
8071
8072 nfsmout:
8073 if (open && adnp && !adlockerror) {
8074 if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
8075 adnp->n_flag &= ~NNEGNCENTRIES;
8076 cache_purge_negatives(NFSTOV(adnp));
8077 }
8078 adnp->n_flag |= NMODIFIED;
8079 nfs_node_unlock(adnp);
8080 adlockerror = ENOENT;
8081 nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
8082 }
8083 if (adnp && !adlockerror && (error == ENOENT) &&
8084 (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
8085 /* add a negative entry in the name cache */
8086 cache_enter(NFSTOV(adnp), NULL, cnp);
8087 adnp->n_flag |= NNEGNCENTRIES;
8088 }
8089 if (adnp && !adlockerror) {
8090 nfs_node_unlock(adnp);
8091 adlockerror = ENOENT;
8092 }
8093 if (!error && !anp && fh.fh_len) {
8094 /* create the vnode with the filehandle and attributes */
8095 xid = savedxid;
8096 error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
8097 if (!error) {
8098 *anpp = anp;
8099 nfs_node_unlock(anp);
8100 }
8101 if (!error && open) {
8102 nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
8103 /* After we have a node, add our open file struct to the node */
8104 nofp = newnofp;
8105 error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
8106 if (error) {
8107 /* This shouldn't happen, because we passed in a new nofp to use. */
8108 printf("nfs_open_file_find_internal failed! %d\n", error);
8109 nofp = NULL;
8110 } else if (nofp != newnofp) {
8111 /*
8112 * Hmm... an open file struct already exists.
8113 * Mark the existing one busy and merge our open into it.
8114 * Then destroy the one we created.
8115 * Note: there's no chance of an open confict because the
8116 * open has already been granted.
8117 */
8118 nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
8119 nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
8120 nofp->nof_stateid = newnofp->nof_stateid;
8121 if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
8122 nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
8123 }
8124 nfs_open_file_clear_busy(newnofp);
8125 nfs_open_file_destroy(newnofp);
8126 newnofp = NULL;
8127 }
8128 if (!error) {
8129 newnofp = NULL;
8130 nofpbusyerror = 0;
8131 /* mark the node as holding a create-initiated open */
8132 nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
8133 nofp->nof_creator = current_thread();
8134 if (nofpp) {
8135 *nofpp = nofp;
8136 }
8137 }
8138 }
8139 }
8140 NVATTR_CLEANUP(&nvattr);
8141 if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
8142 if (!error && anp && !recall) {
8143 /* stuff the delegation state in the node */
8144 lck_mtx_lock(&anp->n_openlock);
8145 anp->n_openflags &= ~N_DELEG_MASK;
8146 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8147 anp->n_dstateid = dstateid;
8148 anp->n_dace = ace;
8149 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8150 lck_mtx_lock(&nmp->nm_lock);
8151 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8152 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8153 }
8154 lck_mtx_unlock(&nmp->nm_lock);
8155 }
8156 lck_mtx_unlock(&anp->n_openlock);
8157 } else {
8158 /* give the delegation back */
8159 if (anp) {
8160 if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
8161 /* update delegation state and return it */
8162 lck_mtx_lock(&anp->n_openlock);
8163 anp->n_openflags &= ~N_DELEG_MASK;
8164 anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
8165 anp->n_dstateid = dstateid;
8166 anp->n_dace = ace;
8167 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8168 lck_mtx_lock(&nmp->nm_lock);
8169 if (anp->n_dlink.tqe_next == NFSNOLIST) {
8170 TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
8171 }
8172 lck_mtx_unlock(&nmp->nm_lock);
8173 }
8174 lck_mtx_unlock(&anp->n_openlock);
8175 /* don't need to send a separate delegreturn for fh */
8176 fh.fh_len = 0;
8177 }
8178 /* return anp's current delegation */
8179 nfs4_delegation_return(anp, 0, thd, cred);
8180 }
8181 if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
8182 nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
8183 }
8184 }
8185 }
8186 if (open) {
8187 if (newnofp) {
8188 /* need to cleanup our temporary nofp */
8189 nfs_open_file_clear_busy(newnofp);
8190 nfs_open_file_destroy(newnofp);
8191 newnofp = NULL;
8192 } else if (nofp && !nofpbusyerror) {
8193 nfs_open_file_clear_busy(nofp);
8194 nofpbusyerror = ENOENT;
8195 }
8196 if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
8197 inuse = 0;
8198 nofp = newnofp = NULL;
8199 rflags = delegation = recall = eof = rlen = retlen = 0;
8200 ace.ace_flags = 0;
8201 s = sbuf;
8202 slen = sizeof(sbuf);
8203 nfsm_chain_cleanup(&nmreq);
8204 nfsm_chain_cleanup(&nmrep);
8205 if (anp) {
8206 vnode_put(NFSTOV(anp));
8207 *anpp = anp = NULL;
8208 }
8209 hadattrdir = (adnp != NULL);
8210 if (noopbusy) {
8211 nfs_open_owner_clear_busy(noop);
8212 noopbusy = 0;
8213 }
8214 goto restart;
8215 }
8216 if (noop) {
8217 if (noopbusy) {
8218 nfs_open_owner_clear_busy(noop);
8219 noopbusy = 0;
8220 }
8221 nfs_open_owner_rele(noop);
8222 }
8223 }
8224 if (!error && prefetch && nmrep.nmc_mhead) {
8225 nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
8226 nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
8227 nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
8228 nfsm_chain_get_32(error, &nmrep, eof);
8229 nfsm_chain_get_32(error, &nmrep, retlen);
8230 if (!error && anp) {
8231 /*
8232 * There can be one problem with doing the prefetch.
8233 * Because we don't have the node before we start the RPC, we
8234 * can't have the buffer busy while the READ is performed.
8235 * So there is a chance that other I/O occured on the same
8236 * range of data while we were performing this RPC. If that
8237 * happens, then it's possible the data we have in the READ
8238 * response is no longer up to date.
8239 * Once we have the node and the buffer, we need to make sure
8240 * that there's no chance we could be putting stale data in
8241 * the buffer.
8242 * So, we check if the range read is dirty or if any I/O may
8243 * have occured on it while we were performing our RPC.
8244 */
8245 struct nfsbuf *bp = NULL;
8246 int lastpg;
8247 uint32_t pagemask;
8248
8249 retlen = MIN(retlen, rlen);
8250
8251 /* check if node needs size update or invalidation */
8252 if (ISSET(anp->n_flag, NUPDATESIZE)) {
8253 nfs_data_update_size(anp, 0);
8254 }
8255 if (!(error = nfs_node_lock(anp))) {
8256 if (anp->n_flag & NNEEDINVALIDATE) {
8257 anp->n_flag &= ~NNEEDINVALIDATE;
8258 nfs_node_unlock(anp);
8259 error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
8260 if (!error) { /* lets play it safe and just drop the data */
8261 error = EIO;
8262 }
8263 } else {
8264 nfs_node_unlock(anp);
8265 }
8266 }
8267
8268 /* calculate page mask for the range of data read */
8269 lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
8270 pagemask = ((1 << (lastpg + 1)) - 1);
8271
8272 if (!error) {
8273 error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
8274 }
8275 /* don't save the data if dirty or potential I/O conflict */
8276 if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
8277 timevalcmp(&anp->n_lastio, &now, <)) {
8278 OSAddAtomic64(1, &nfsstats.read_bios);
8279 CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
8280 SET(bp->nb_flags, NB_READ);
8281 NFS_BUF_MAP(bp);
8282 nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
8283 if (error) {
8284 bp->nb_error = error;
8285 SET(bp->nb_flags, NB_ERROR);
8286 } else {
8287 bp->nb_offio = 0;
8288 bp->nb_endio = rlen;
8289 if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
8290 bp->nb_endio = retlen;
8291 }
8292 if (eof || (retlen == 0)) {
8293 /* zero out the remaining data (up to EOF) */
8294 off_t rpcrem, eofrem, rem;
8295 rpcrem = (rlen - retlen);
8296 eofrem = anp->n_size - (NBOFF(bp) + retlen);
8297 rem = (rpcrem < eofrem) ? rpcrem : eofrem;
8298 if (rem > 0) {
8299 bzero(bp->nb_data + retlen, rem);
8300 }
8301 } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
8302 /* ugh... short read ... just invalidate for now... */
8303 SET(bp->nb_flags, NB_INVAL);
8304 }
8305 }
8306 nfs_buf_read_finish(bp);
8307 microuptime(&anp->n_lastio);
8308 }
8309 if (bp) {
8310 nfs_buf_release(bp, 1);
8311 }
8312 }
8313 error = 0; /* ignore any transient error in processing the prefetch */
8314 }
8315 if (adnp && !adbusyerror) {
8316 nfs_node_clear_busy(adnp);
8317 adbusyerror = ENOENT;
8318 }
8319 if (!busyerror) {
8320 nfs_node_clear_busy(np);
8321 busyerror = ENOENT;
8322 }
8323 if (adnp) {
8324 vnode_put(NFSTOV(adnp));
8325 }
8326 if (error && *anpp) {
8327 vnode_put(NFSTOV(*anpp));
8328 *anpp = NULL;
8329 }
8330 nfsm_chain_cleanup(&nmreq);
8331 nfsm_chain_cleanup(&nmrep);
8332 return error;
8333 }
8334
8335 /*
8336 * Remove a named attribute.
8337 */
8338 int
8339 nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
8340 {
8341 nfsnode_t adnp = NULL;
8342 struct nfsmount *nmp;
8343 struct componentname cn;
8344 struct vnop_remove_args vra;
8345 int error, putanp = 0;
8346
8347 nmp = NFSTONMP(np);
8348 if (nfs_mount_gone(nmp)) {
8349 return ENXIO;
8350 }
8351
8352 bzero(&cn, sizeof(cn));
8353 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8354 cn.cn_namelen = strlen(name);
8355 cn.cn_nameiop = DELETE;
8356 cn.cn_flags = 0;
8357
8358 if (!anp) {
8359 error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8360 0, ctx, &anp, NULL);
8361 if ((!error && !anp) || (error == ENOATTR)) {
8362 error = ENOENT;
8363 }
8364 if (error) {
8365 if (anp) {
8366 vnode_put(NFSTOV(anp));
8367 anp = NULL;
8368 }
8369 goto out;
8370 }
8371 putanp = 1;
8372 }
8373
8374 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8375 goto out;
8376 }
8377 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8378 nfs_node_clear_busy(np);
8379 if (!adnp) {
8380 error = ENOENT;
8381 goto out;
8382 }
8383
8384 vra.a_desc = &vnop_remove_desc;
8385 vra.a_dvp = NFSTOV(adnp);
8386 vra.a_vp = NFSTOV(anp);
8387 vra.a_cnp = &cn;
8388 vra.a_flags = 0;
8389 vra.a_context = ctx;
8390 error = nfs_vnop_remove(&vra);
8391 out:
8392 if (adnp) {
8393 vnode_put(NFSTOV(adnp));
8394 }
8395 if (putanp) {
8396 vnode_put(NFSTOV(anp));
8397 }
8398 return error;
8399 }
8400
8401 int
8402 nfs4_vnop_getxattr(
8403 struct vnop_getxattr_args /* {
8404 * struct vnodeop_desc *a_desc;
8405 * vnode_t a_vp;
8406 * const char * a_name;
8407 * uio_t a_uio;
8408 * size_t *a_size;
8409 * int a_options;
8410 * vfs_context_t a_context;
8411 * } */*ap)
8412 {
8413 vfs_context_t ctx = ap->a_context;
8414 struct nfsmount *nmp;
8415 struct nfs_vattr nvattr;
8416 struct componentname cn;
8417 nfsnode_t anp;
8418 int error = 0, isrsrcfork;
8419
8420 nmp = VTONMP(ap->a_vp);
8421 if (nfs_mount_gone(nmp)) {
8422 return ENXIO;
8423 }
8424
8425 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8426 return ENOTSUP;
8427 }
8428 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8429 if (error) {
8430 return error;
8431 }
8432 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8433 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8434 return ENOATTR;
8435 }
8436
8437 bzero(&cn, sizeof(cn));
8438 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8439 cn.cn_namelen = strlen(ap->a_name);
8440 cn.cn_nameiop = LOOKUP;
8441 cn.cn_flags = MAKEENTRY;
8442
8443 /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
8444 isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8445
8446 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8447 !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
8448 if ((!error && !anp) || (error == ENOENT)) {
8449 error = ENOATTR;
8450 }
8451 if (!error) {
8452 if (ap->a_uio) {
8453 error = nfs_bioread(anp, ap->a_uio, 0, ctx);
8454 } else {
8455 *ap->a_size = anp->n_size;
8456 }
8457 }
8458 if (anp) {
8459 vnode_put(NFSTOV(anp));
8460 }
8461 return error;
8462 }
8463
8464 int
8465 nfs4_vnop_setxattr(
8466 struct vnop_setxattr_args /* {
8467 * struct vnodeop_desc *a_desc;
8468 * vnode_t a_vp;
8469 * const char * a_name;
8470 * uio_t a_uio;
8471 * int a_options;
8472 * vfs_context_t a_context;
8473 * } */*ap)
8474 {
8475 vfs_context_t ctx = ap->a_context;
8476 int options = ap->a_options;
8477 uio_t uio = ap->a_uio;
8478 const char *name = ap->a_name;
8479 struct nfsmount *nmp;
8480 struct componentname cn;
8481 nfsnode_t anp = NULL;
8482 int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
8483 #define FINDERINFOSIZE 32
8484 uint8_t finfo[FINDERINFOSIZE];
8485 uint32_t *finfop;
8486 struct nfs_open_file *nofp = NULL;
8487 char uio_buf[UIO_SIZEOF(1)];
8488 uio_t auio;
8489 struct vnop_write_args vwa;
8490
8491 nmp = VTONMP(ap->a_vp);
8492 if (nfs_mount_gone(nmp)) {
8493 return ENXIO;
8494 }
8495
8496 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8497 return ENOTSUP;
8498 }
8499
8500 if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
8501 return EINVAL;
8502 }
8503
8504 /* XXX limitation based on need to back up uio on short write */
8505 if (uio_iovcnt(uio) > 1) {
8506 printf("nfs4_vnop_setxattr: iovcnt > 1\n");
8507 return EINVAL;
8508 }
8509
8510 bzero(&cn, sizeof(cn));
8511 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
8512 cn.cn_namelen = strlen(name);
8513 cn.cn_nameiop = CREATE;
8514 cn.cn_flags = MAKEENTRY;
8515
8516 isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
8517 isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
8518 if (!isrsrcfork) {
8519 uio_setoffset(uio, 0);
8520 }
8521 if (isfinderinfo) {
8522 if (uio_resid(uio) != sizeof(finfo)) {
8523 return ERANGE;
8524 }
8525 error = uiomove((char*)&finfo, sizeof(finfo), uio);
8526 if (error) {
8527 return error;
8528 }
8529 /* setting a FinderInfo of all zeroes means remove the FinderInfo */
8530 empty = 1;
8531 for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
8532 if (finfop[i]) {
8533 empty = 0;
8534 break;
8535 }
8536 }
8537 if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
8538 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8539 if (error == ENOENT) {
8540 error = 0;
8541 }
8542 return error;
8543 }
8544 /* first, let's see if we get a create/replace error */
8545 }
8546
8547 /*
8548 * create/open the xattr
8549 *
8550 * We need to make sure not to create it if XATTR_REPLACE.
8551 * For all xattrs except the resource fork, we also want to
8552 * truncate the xattr to remove any current data. We'll do
8553 * that by setting the size to 0 on create/open.
8554 */
8555 flags = 0;
8556 if (!(options & XATTR_REPLACE)) {
8557 flags |= NFS_GET_NAMED_ATTR_CREATE;
8558 }
8559 if (options & XATTR_CREATE) {
8560 flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
8561 }
8562 if (!isrsrcfork) {
8563 flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
8564 }
8565
8566 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8567 flags, ctx, &anp, &nofp);
8568 if (!error && !anp) {
8569 error = ENOATTR;
8570 }
8571 if (error) {
8572 goto out;
8573 }
8574 /* grab the open state from the get/create/open */
8575 if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
8576 nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
8577 nofp->nof_creator = NULL;
8578 nfs_open_file_clear_busy(nofp);
8579 }
8580
8581 /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
8582 if (isfinderinfo && empty) {
8583 goto doclose;
8584 }
8585
8586 /*
8587 * Write the data out and flush.
8588 *
8589 * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
8590 */
8591 vwa.a_desc = &vnop_write_desc;
8592 vwa.a_vp = NFSTOV(anp);
8593 vwa.a_uio = NULL;
8594 vwa.a_ioflag = 0;
8595 vwa.a_context = ctx;
8596 if (isfinderinfo) {
8597 auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
8598 uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
8599 vwa.a_uio = auio;
8600 } else if (uio_resid(uio) > 0) {
8601 vwa.a_uio = uio;
8602 }
8603 if (vwa.a_uio) {
8604 error = nfs_vnop_write(&vwa);
8605 if (!error) {
8606 error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
8607 }
8608 }
8609 doclose:
8610 /* Close the xattr. */
8611 if (nofp) {
8612 int busyerror = nfs_open_file_set_busy(nofp, NULL);
8613 closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
8614 if (!busyerror) {
8615 nfs_open_file_clear_busy(nofp);
8616 }
8617 }
8618 if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
8619 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
8620 if (error == ENOENT) {
8621 error = 0;
8622 }
8623 }
8624 if (!error) {
8625 error = closeerror;
8626 }
8627 out:
8628 if (anp) {
8629 vnode_put(NFSTOV(anp));
8630 }
8631 if (error == ENOENT) {
8632 error = ENOATTR;
8633 }
8634 return error;
8635 }
8636
8637 int
8638 nfs4_vnop_removexattr(
8639 struct vnop_removexattr_args /* {
8640 * struct vnodeop_desc *a_desc;
8641 * vnode_t a_vp;
8642 * const char * a_name;
8643 * int a_options;
8644 * vfs_context_t a_context;
8645 * } */*ap)
8646 {
8647 struct nfsmount *nmp = VTONMP(ap->a_vp);
8648 int error;
8649
8650 if (nfs_mount_gone(nmp)) {
8651 return ENXIO;
8652 }
8653 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8654 return ENOTSUP;
8655 }
8656
8657 error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
8658 if (error == ENOENT) {
8659 error = ENOATTR;
8660 }
8661 return error;
8662 }
8663
8664 int
8665 nfs4_vnop_listxattr(
8666 struct vnop_listxattr_args /* {
8667 * struct vnodeop_desc *a_desc;
8668 * vnode_t a_vp;
8669 * uio_t a_uio;
8670 * size_t *a_size;
8671 * int a_options;
8672 * vfs_context_t a_context;
8673 * } */*ap)
8674 {
8675 vfs_context_t ctx = ap->a_context;
8676 nfsnode_t np = VTONFS(ap->a_vp);
8677 uio_t uio = ap->a_uio;
8678 nfsnode_t adnp = NULL;
8679 struct nfsmount *nmp;
8680 int error, done, i;
8681 struct nfs_vattr nvattr;
8682 uint64_t cookie, nextcookie, lbn = 0;
8683 struct nfsbuf *bp = NULL;
8684 struct nfs_dir_buf_header *ndbhp;
8685 struct direntry *dp;
8686
8687 nmp = VTONMP(ap->a_vp);
8688 if (nfs_mount_gone(nmp)) {
8689 return ENXIO;
8690 }
8691
8692 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8693 return ENOTSUP;
8694 }
8695
8696 error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
8697 if (error) {
8698 return error;
8699 }
8700 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8701 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8702 return 0;
8703 }
8704
8705 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
8706 return error;
8707 }
8708 adnp = nfs4_named_attr_dir_get(np, 1, ctx);
8709 nfs_node_clear_busy(np);
8710 if (!adnp) {
8711 goto out;
8712 }
8713
8714 if ((error = nfs_node_lock(adnp))) {
8715 goto out;
8716 }
8717
8718 if (adnp->n_flag & NNEEDINVALIDATE) {
8719 adnp->n_flag &= ~NNEEDINVALIDATE;
8720 nfs_invaldir(adnp);
8721 nfs_node_unlock(adnp);
8722 error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8723 if (!error) {
8724 error = nfs_node_lock(adnp);
8725 }
8726 if (error) {
8727 goto out;
8728 }
8729 }
8730
8731 /*
8732 * check for need to invalidate when (re)starting at beginning
8733 */
8734 if (adnp->n_flag & NMODIFIED) {
8735 nfs_invaldir(adnp);
8736 nfs_node_unlock(adnp);
8737 if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
8738 goto out;
8739 }
8740 } else {
8741 nfs_node_unlock(adnp);
8742 }
8743 /* nfs_getattr() will check changed and purge caches */
8744 if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
8745 goto out;
8746 }
8747
8748 if (uio && (uio_resid(uio) == 0)) {
8749 goto out;
8750 }
8751
8752 done = 0;
8753 nextcookie = lbn = 0;
8754
8755 while (!error && !done) {
8756 OSAddAtomic64(1, &nfsstats.biocache_readdirs);
8757 cookie = nextcookie;
8758 getbuffer:
8759 error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
8760 if (error) {
8761 goto out;
8762 }
8763 ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
8764 if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
8765 if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
8766 ndbhp->ndbh_flags = 0;
8767 ndbhp->ndbh_count = 0;
8768 ndbhp->ndbh_entry_end = sizeof(*ndbhp);
8769 ndbhp->ndbh_ncgen = adnp->n_ncgen;
8770 }
8771 error = nfs_buf_readdir(bp, ctx);
8772 if (error == NFSERR_DIRBUFDROPPED) {
8773 goto getbuffer;
8774 }
8775 if (error) {
8776 nfs_buf_release(bp, 1);
8777 }
8778 if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
8779 if (!nfs_node_lock(adnp)) {
8780 nfs_invaldir(adnp);
8781 nfs_node_unlock(adnp);
8782 }
8783 nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
8784 if (error == NFSERR_BAD_COOKIE) {
8785 error = ENOENT;
8786 }
8787 }
8788 if (error) {
8789 goto out;
8790 }
8791 }
8792
8793 /* go through all the entries copying/counting */
8794 dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
8795 for (i = 0; i < ndbhp->ndbh_count; i++) {
8796 if (!xattr_protected(dp->d_name)) {
8797 if (uio == NULL) {
8798 *ap->a_size += dp->d_namlen + 1;
8799 } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
8800 error = ERANGE;
8801 } else {
8802 error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
8803 if (error && (error != EFAULT)) {
8804 error = ERANGE;
8805 }
8806 }
8807 }
8808 nextcookie = dp->d_seekoff;
8809 dp = NFS_DIRENTRY_NEXT(dp);
8810 }
8811
8812 if (i == ndbhp->ndbh_count) {
8813 /* hit end of buffer, move to next buffer */
8814 lbn = nextcookie;
8815 /* if we also hit EOF, we're done */
8816 if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
8817 done = 1;
8818 }
8819 }
8820 if (!error && !done && (nextcookie == cookie)) {
8821 printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
8822 error = EIO;
8823 }
8824 nfs_buf_release(bp, 1);
8825 }
8826 out:
8827 if (adnp) {
8828 vnode_put(NFSTOV(adnp));
8829 }
8830 return error;
8831 }
8832
8833 #if NAMEDSTREAMS
8834 int
8835 nfs4_vnop_getnamedstream(
8836 struct vnop_getnamedstream_args /* {
8837 * struct vnodeop_desc *a_desc;
8838 * vnode_t a_vp;
8839 * vnode_t *a_svpp;
8840 * const char *a_name;
8841 * enum nsoperation a_operation;
8842 * int a_flags;
8843 * vfs_context_t a_context;
8844 * } */*ap)
8845 {
8846 vfs_context_t ctx = ap->a_context;
8847 struct nfsmount *nmp;
8848 struct nfs_vattr nvattr;
8849 struct componentname cn;
8850 nfsnode_t anp;
8851 int error = 0;
8852
8853 nmp = VTONMP(ap->a_vp);
8854 if (nfs_mount_gone(nmp)) {
8855 return ENXIO;
8856 }
8857
8858 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8859 return ENOTSUP;
8860 }
8861 error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
8862 if (error) {
8863 return error;
8864 }
8865 if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
8866 !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
8867 return ENOATTR;
8868 }
8869
8870 bzero(&cn, sizeof(cn));
8871 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8872 cn.cn_namelen = strlen(ap->a_name);
8873 cn.cn_nameiop = LOOKUP;
8874 cn.cn_flags = MAKEENTRY;
8875
8876 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
8877 0, ctx, &anp, NULL);
8878 if ((!error && !anp) || (error == ENOENT)) {
8879 error = ENOATTR;
8880 }
8881 if (!error && anp) {
8882 *ap->a_svpp = NFSTOV(anp);
8883 } else if (anp) {
8884 vnode_put(NFSTOV(anp));
8885 }
8886 return error;
8887 }
8888
8889 int
8890 nfs4_vnop_makenamedstream(
8891 struct vnop_makenamedstream_args /* {
8892 * struct vnodeop_desc *a_desc;
8893 * vnode_t *a_svpp;
8894 * vnode_t a_vp;
8895 * const char *a_name;
8896 * int a_flags;
8897 * vfs_context_t a_context;
8898 * } */*ap)
8899 {
8900 vfs_context_t ctx = ap->a_context;
8901 struct nfsmount *nmp;
8902 struct componentname cn;
8903 nfsnode_t anp;
8904 int error = 0;
8905
8906 nmp = VTONMP(ap->a_vp);
8907 if (nfs_mount_gone(nmp)) {
8908 return ENXIO;
8909 }
8910
8911 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8912 return ENOTSUP;
8913 }
8914
8915 bzero(&cn, sizeof(cn));
8916 cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
8917 cn.cn_namelen = strlen(ap->a_name);
8918 cn.cn_nameiop = CREATE;
8919 cn.cn_flags = MAKEENTRY;
8920
8921 error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
8922 NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
8923 if ((!error && !anp) || (error == ENOENT)) {
8924 error = ENOATTR;
8925 }
8926 if (!error && anp) {
8927 *ap->a_svpp = NFSTOV(anp);
8928 } else if (anp) {
8929 vnode_put(NFSTOV(anp));
8930 }
8931 return error;
8932 }
8933
8934 int
8935 nfs4_vnop_removenamedstream(
8936 struct vnop_removenamedstream_args /* {
8937 * struct vnodeop_desc *a_desc;
8938 * vnode_t a_vp;
8939 * vnode_t a_svp;
8940 * const char *a_name;
8941 * int a_flags;
8942 * vfs_context_t a_context;
8943 * } */*ap)
8944 {
8945 struct nfsmount *nmp = VTONMP(ap->a_vp);
8946 nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
8947 nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
8948
8949 if (nfs_mount_gone(nmp)) {
8950 return ENXIO;
8951 }
8952
8953 /*
8954 * Given that a_svp is a named stream, checking for
8955 * named attribute support is kinda pointless.
8956 */
8957 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
8958 return ENOTSUP;
8959 }
8960
8961 return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);
8962 }
8963
8964 #endif
8965 #endif /* CONFIG_NFS4 */
8966
8967 #endif /* CONFIG_NFS_CLIENT */