]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/nfs/nfs_subs.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_subs.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $
66 */
67
68#include <nfs/nfs_conf.h>
69#if CONFIG_NFS
70
71/*
72 * These functions support the macros and help fiddle mbuf chains for
73 * the nfs op functions. They do things like create the rpc header and
74 * copy data between mbuf chains and uio lists.
75 */
76#include <sys/param.h>
77#include <sys/proc.h>
78#include <sys/kauth.h>
79#include <sys/systm.h>
80#include <sys/kernel.h>
81#include <sys/mount_internal.h>
82#include <sys/vnode_internal.h>
83#include <sys/kpi_mbuf.h>
84#include <sys/socket.h>
85#include <sys/un.h>
86#include <sys/stat.h>
87#include <sys/malloc.h>
88#include <sys/syscall.h>
89#include <sys/ubc_internal.h>
90#include <sys/fcntl.h>
91#include <sys/uio.h>
92#include <sys/domain.h>
93#include <libkern/OSAtomic.h>
94#include <kern/thread_call.h>
95#include <kern/task.h>
96
97#include <sys/vm.h>
98#include <sys/vmparam.h>
99
100#include <sys/time.h>
101#include <kern/clock.h>
102
103#include <nfs/rpcv2.h>
104#include <nfs/nfsproto.h>
105#include <nfs/nfs.h>
106#include <nfs/nfsnode.h>
107#if CONFIG_NFS_CLIENT
108#define _NFS_XDR_SUBS_FUNCS_ /* define this to get xdrbuf function definitions */
109#endif
110#include <nfs/xdr_subs.h>
111#include <nfs/nfsm_subs.h>
112#include <nfs/nfs_gss.h>
113#include <nfs/nfsmount.h>
114#include <nfs/nfs_lock.h>
115
116#include <miscfs/specfs/specdev.h>
117
118#include <netinet/in.h>
119#include <net/kpi_interface.h>
120
121#include <sys/utfconv.h>
122
123/*
124 * NFS globals
125 */
126struct nfsstats __attribute__((aligned(8))) nfsstats;
127size_t nfs_mbuf_mhlen = 0, nfs_mbuf_minclsize = 0;
128
129/*
130 * functions to convert between NFS and VFS types
131 */
132nfstype
133vtonfs_type(enum vtype vtype, int nfsvers)
134{
135 switch (vtype) {
136 case VNON:
137 return NFNON;
138 case VREG:
139 return NFREG;
140 case VDIR:
141 return NFDIR;
142 case VBLK:
143 return NFBLK;
144 case VCHR:
145 return NFCHR;
146 case VLNK:
147 return NFLNK;
148 case VSOCK:
149 if (nfsvers > NFS_VER2) {
150 return NFSOCK;
151 }
152 return NFNON;
153 case VFIFO:
154 if (nfsvers > NFS_VER2) {
155 return NFFIFO;
156 }
157 return NFNON;
158 case VBAD:
159 case VSTR:
160 case VCPLX:
161 default:
162 return NFNON;
163 }
164}
165
166enum vtype
167nfstov_type(nfstype nvtype, int nfsvers)
168{
169 switch (nvtype) {
170 case NFNON:
171 return VNON;
172 case NFREG:
173 return VREG;
174 case NFDIR:
175 return VDIR;
176 case NFBLK:
177 return VBLK;
178 case NFCHR:
179 return VCHR;
180 case NFLNK:
181 return VLNK;
182 case NFSOCK:
183 if (nfsvers > NFS_VER2) {
184 return VSOCK;
185 }
186 OS_FALLTHROUGH;
187 case NFFIFO:
188 if (nfsvers > NFS_VER2) {
189 return VFIFO;
190 }
191 OS_FALLTHROUGH;
192 case NFATTRDIR:
193 if (nfsvers > NFS_VER3) {
194 return VDIR;
195 }
196 OS_FALLTHROUGH;
197 case NFNAMEDATTR:
198 if (nfsvers > NFS_VER3) {
199 return VREG;
200 }
201 OS_FALLTHROUGH;
202 default:
203 return VNON;
204 }
205}
206
207int
208vtonfsv2_mode(enum vtype vtype, mode_t m)
209{
210 switch (vtype) {
211 case VNON:
212 case VREG:
213 case VDIR:
214 case VBLK:
215 case VCHR:
216 case VLNK:
217 case VSOCK:
218 return vnode_makeimode(vtype, m);
219 case VFIFO:
220 return vnode_makeimode(VCHR, m);
221 case VBAD:
222 case VSTR:
223 case VCPLX:
224 default:
225 return vnode_makeimode(VNON, m);
226 }
227}
228
229#if CONFIG_NFS_SERVER
230
231/*
232 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
233 */
234int nfsv3_procid[NFS_NPROCS] = {
235 NFSPROC_NULL,
236 NFSPROC_GETATTR,
237 NFSPROC_SETATTR,
238 NFSPROC_NOOP,
239 NFSPROC_LOOKUP,
240 NFSPROC_READLINK,
241 NFSPROC_READ,
242 NFSPROC_NOOP,
243 NFSPROC_WRITE,
244 NFSPROC_CREATE,
245 NFSPROC_REMOVE,
246 NFSPROC_RENAME,
247 NFSPROC_LINK,
248 NFSPROC_SYMLINK,
249 NFSPROC_MKDIR,
250 NFSPROC_RMDIR,
251 NFSPROC_READDIR,
252 NFSPROC_FSSTAT,
253 NFSPROC_NOOP,
254 NFSPROC_NOOP,
255 NFSPROC_NOOP,
256 NFSPROC_NOOP,
257 NFSPROC_NOOP
258};
259
260#endif /* CONFIG_NFS_SERVER */
261
262/*
263 * and the reverse mapping from generic to Version 2 procedure numbers
264 */
265int nfsv2_procid[NFS_NPROCS] = {
266 NFSV2PROC_NULL,
267 NFSV2PROC_GETATTR,
268 NFSV2PROC_SETATTR,
269 NFSV2PROC_LOOKUP,
270 NFSV2PROC_NOOP,
271 NFSV2PROC_READLINK,
272 NFSV2PROC_READ,
273 NFSV2PROC_WRITE,
274 NFSV2PROC_CREATE,
275 NFSV2PROC_MKDIR,
276 NFSV2PROC_SYMLINK,
277 NFSV2PROC_CREATE,
278 NFSV2PROC_REMOVE,
279 NFSV2PROC_RMDIR,
280 NFSV2PROC_RENAME,
281 NFSV2PROC_LINK,
282 NFSV2PROC_READDIR,
283 NFSV2PROC_NOOP,
284 NFSV2PROC_STATFS,
285 NFSV2PROC_NOOP,
286 NFSV2PROC_NOOP,
287 NFSV2PROC_NOOP,
288 NFSV2PROC_NOOP
289};
290
291
292/*
293 * initialize NFS's cache of mbuf constants
294 */
295void
296nfs_mbuf_init(void)
297{
298 struct mbuf_stat ms;
299
300 mbuf_stats(&ms);
301 nfs_mbuf_mhlen = ms.mhlen;
302 nfs_mbuf_minclsize = ms.minclsize;
303}
304
305#if CONFIG_NFS_SERVER
306
307/*
308 * allocate a list of mbufs to hold the given amount of data
309 */
310int
311nfsm_mbuf_get_list(size_t size, mbuf_t *mp, int *mbcnt)
312{
313 int error, cnt;
314 mbuf_t mhead, mlast, m;
315 size_t len, mlen;
316
317 error = cnt = 0;
318 mhead = mlast = NULL;
319 len = 0;
320
321 while (len < size) {
322 nfsm_mbuf_getcluster(error, &m, (size - len));
323 if (error) {
324 break;
325 }
326 if (!mhead) {
327 mhead = m;
328 }
329 if (mlast && ((error = mbuf_setnext(mlast, m)))) {
330 mbuf_free(m);
331 break;
332 }
333 mlen = mbuf_maxlen(m);
334 if ((len + mlen) > size) {
335 mlen = size - len;
336 }
337 mbuf_setlen(m, mlen);
338 len += mlen;
339 cnt++;
340 mlast = m;
341 }
342
343 if (!error) {
344 *mp = mhead;
345 *mbcnt = cnt;
346 }
347 return error;
348}
349
350#endif /* CONFIG_NFS_SERVER */
351
352/*
353 * nfsm_chain_new_mbuf()
354 *
355 * Add a new mbuf to the given chain.
356 */
357int
358nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint)
359{
360 mbuf_t mb;
361 int error = 0;
362
363 if (nmc->nmc_flags & NFSM_CHAIN_FLAG_ADD_CLUSTERS) {
364 sizehint = nfs_mbuf_minclsize;
365 }
366
367 /* allocate a new mbuf */
368 nfsm_mbuf_getcluster(error, &mb, sizehint);
369 if (error) {
370 return error;
371 }
372 if (mb == NULL) {
373 panic("got NULL mbuf?");
374 }
375
376 /* do we have a current mbuf? */
377 if (nmc->nmc_mcur) {
378 /* first cap off current mbuf */
379 mbuf_setlen(nmc->nmc_mcur, nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur));
380 /* then append the new mbuf */
381 error = mbuf_setnext(nmc->nmc_mcur, mb);
382 if (error) {
383 mbuf_free(mb);
384 return error;
385 }
386 }
387
388 /* set up for using the new mbuf */
389 nmc->nmc_mcur = mb;
390 nmc->nmc_ptr = mbuf_data(mb);
391 nmc->nmc_left = mbuf_trailingspace(mb);
392
393 return 0;
394}
395
396/*
397 * nfsm_chain_add_opaque_f()
398 *
399 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
400 */
401int
402nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, size_t len)
403{
404 size_t paddedlen, tlen;
405 int error;
406
407 paddedlen = nfsm_rndup(len);
408
409 while (paddedlen) {
410 if (!nmc->nmc_left) {
411 error = nfsm_chain_new_mbuf(nmc, paddedlen);
412 if (error) {
413 return error;
414 }
415 }
416 tlen = MIN(nmc->nmc_left, paddedlen);
417 if (tlen) {
418 if (len) {
419 if (tlen > len) {
420 tlen = len;
421 }
422 bcopy(buf, nmc->nmc_ptr, tlen);
423 } else {
424 bzero(nmc->nmc_ptr, tlen);
425 }
426 nmc->nmc_ptr += tlen;
427 nmc->nmc_left -= tlen;
428 paddedlen -= tlen;
429 if (len) {
430 buf += tlen;
431 len -= tlen;
432 }
433 }
434 }
435 return 0;
436}
437
438/*
439 * nfsm_chain_add_opaque_nopad_f()
440 *
441 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
442 * Do not XDR pad.
443 */
444int
445nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, size_t len)
446{
447 size_t tlen;
448 int error;
449
450 while (len > 0) {
451 if (nmc->nmc_left <= 0) {
452 error = nfsm_chain_new_mbuf(nmc, len);
453 if (error) {
454 return error;
455 }
456 }
457 tlen = MIN(nmc->nmc_left, len);
458 bcopy(buf, nmc->nmc_ptr, tlen);
459 nmc->nmc_ptr += tlen;
460 nmc->nmc_left -= tlen;
461 len -= tlen;
462 buf += tlen;
463 }
464 return 0;
465}
466
467/*
468 * nfsm_chain_add_uio()
469 *
470 * Add "len" bytes of data from "uio" to the given chain.
471 */
472int
473nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, size_t len)
474{
475 size_t paddedlen, tlen;
476 int error;
477
478 paddedlen = nfsm_rndup(len);
479
480 while (paddedlen) {
481 if (!nmc->nmc_left) {
482 error = nfsm_chain_new_mbuf(nmc, paddedlen);
483 if (error) {
484 return error;
485 }
486 }
487 tlen = MIN(nmc->nmc_left, paddedlen);
488 if (tlen) {
489 if (len) {
490 tlen = MIN(INT32_MAX, MIN(tlen, len));
491 uiomove(nmc->nmc_ptr, (int)tlen, uio);
492 } else {
493 bzero(nmc->nmc_ptr, tlen);
494 }
495 nmc->nmc_ptr += tlen;
496 nmc->nmc_left -= tlen;
497 paddedlen -= tlen;
498 if (len) {
499 len -= tlen;
500 }
501 }
502 }
503 return 0;
504}
505
506/*
507 * Find the length of the NFS mbuf chain
508 * up to the current encoding/decoding offset.
509 */
510size_t
511nfsm_chain_offset(struct nfsm_chain *nmc)
512{
513 mbuf_t mb;
514 size_t len = 0;
515
516 for (mb = nmc->nmc_mhead; mb; mb = mbuf_next(mb)) {
517 if (mb == nmc->nmc_mcur) {
518 return len + (nmc->nmc_ptr - (caddr_t) mbuf_data(mb));
519 }
520 len += mbuf_len(mb);
521 }
522
523 return len;
524}
525
526/*
527 * nfsm_chain_advance()
528 *
529 * Advance an nfsm_chain by "len" bytes.
530 */
531int
532nfsm_chain_advance(struct nfsm_chain *nmc, size_t len)
533{
534 mbuf_t mb;
535
536 while (len) {
537 if (nmc->nmc_left >= len) {
538 nmc->nmc_left -= len;
539 nmc->nmc_ptr += len;
540 return 0;
541 }
542 len -= nmc->nmc_left;
543 nmc->nmc_mcur = mb = mbuf_next(nmc->nmc_mcur);
544 if (!mb) {
545 return EBADRPC;
546 }
547 nmc->nmc_ptr = mbuf_data(mb);
548 nmc->nmc_left = mbuf_len(mb);
549 }
550
551 return 0;
552}
553
554/*
555 * nfsm_chain_reverse()
556 *
557 * Reverse decode offset in an nfsm_chain by "len" bytes.
558 */
559int
560nfsm_chain_reverse(struct nfsm_chain *nmc, size_t len)
561{
562 size_t mlen, new_offset;
563 int error = 0;
564
565 mlen = nmc->nmc_ptr - (caddr_t) mbuf_data(nmc->nmc_mcur);
566 if (len <= mlen) {
567 nmc->nmc_ptr -= len;
568 nmc->nmc_left += len;
569 return 0;
570 }
571
572 new_offset = nfsm_chain_offset(nmc) - len;
573 nfsm_chain_dissect_init(error, nmc, nmc->nmc_mhead);
574 if (error) {
575 return error;
576 }
577
578 return nfsm_chain_advance(nmc, new_offset);
579}
580
581/*
582 * nfsm_chain_get_opaque_pointer_f()
583 *
584 * Return a pointer to the next "len" bytes of contiguous data in
585 * the mbuf chain. If the next "len" bytes are not contiguous, we
586 * try to manipulate the mbuf chain so that it is.
587 *
588 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
589 */
590int
591nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **pptr)
592{
593 mbuf_t mbcur, mb;
594 uint32_t padlen;
595 size_t mblen, cplen, need, left;
596 u_char *ptr;
597 int error = 0;
598
599 /* move to next mbuf with data */
600 while (nmc->nmc_mcur && (nmc->nmc_left == 0)) {
601 mb = mbuf_next(nmc->nmc_mcur);
602 nmc->nmc_mcur = mb;
603 if (!mb) {
604 break;
605 }
606 nmc->nmc_ptr = mbuf_data(mb);
607 nmc->nmc_left = mbuf_len(mb);
608 }
609 /* check if we've run out of data */
610 if (!nmc->nmc_mcur) {
611 return EBADRPC;
612 }
613
614 /* do we already have a contiguous buffer? */
615 if (nmc->nmc_left >= len) {
616 /* the returned pointer will be the current pointer */
617 *pptr = (u_char*)nmc->nmc_ptr;
618 error = nfsm_chain_advance(nmc, nfsm_rndup(len));
619 return error;
620 }
621
622 padlen = nfsm_rndup(len) - len;
623
624 /* we need (len - left) more bytes */
625 mbcur = nmc->nmc_mcur;
626 left = nmc->nmc_left;
627 need = len - left;
628
629 if (need > mbuf_trailingspace(mbcur)) {
630 /*
631 * The needed bytes won't fit in the current mbuf so we'll
632 * allocate a new mbuf to hold the contiguous range of data.
633 */
634 nfsm_mbuf_getcluster(error, &mb, len);
635 if (error) {
636 return error;
637 }
638 /* double check that this mbuf can hold all the data */
639 if (mbuf_maxlen(mb) < len) {
640 mbuf_free(mb);
641 return EOVERFLOW;
642 }
643
644 /* the returned pointer will be the new mbuf's data pointer */
645 *pptr = ptr = mbuf_data(mb);
646
647 /* copy "left" bytes to the new mbuf */
648 bcopy(nmc->nmc_ptr, ptr, left);
649 ptr += left;
650 mbuf_setlen(mb, left);
651
652 /* insert the new mbuf between the current and next mbufs */
653 error = mbuf_setnext(mb, mbuf_next(mbcur));
654 if (!error) {
655 error = mbuf_setnext(mbcur, mb);
656 }
657 if (error) {
658 mbuf_free(mb);
659 return error;
660 }
661
662 /* reduce current mbuf's length by "left" */
663 mbuf_setlen(mbcur, mbuf_len(mbcur) - left);
664
665 /*
666 * update nmc's state to point at the end of the mbuf
667 * where the needed data will be copied to.
668 */
669 nmc->nmc_mcur = mbcur = mb;
670 nmc->nmc_left = 0;
671 nmc->nmc_ptr = (caddr_t)ptr;
672 } else {
673 /* The rest of the data will fit in this mbuf. */
674
675 /* the returned pointer will be the current pointer */
676 *pptr = (u_char*)nmc->nmc_ptr;
677
678 /*
679 * update nmc's state to point at the end of the mbuf
680 * where the needed data will be copied to.
681 */
682 nmc->nmc_ptr += left;
683 nmc->nmc_left = 0;
684 }
685
686 /*
687 * move the next "need" bytes into the current
688 * mbuf from the mbufs that follow
689 */
690
691 /* extend current mbuf length */
692 mbuf_setlen(mbcur, mbuf_len(mbcur) + need);
693
694 /* mb follows mbufs we're copying/compacting data from */
695 mb = mbuf_next(mbcur);
696
697 while (need && mb) {
698 /* copy as much as we need/can */
699 ptr = mbuf_data(mb);
700 mblen = mbuf_len(mb);
701 cplen = MIN(mblen, need);
702 if (cplen) {
703 bcopy(ptr, nmc->nmc_ptr, cplen);
704 /*
705 * update the mbuf's pointer and length to reflect that
706 * the data was shifted to an earlier mbuf in the chain
707 */
708 error = mbuf_setdata(mb, ptr + cplen, mblen - cplen);
709 if (error) {
710 mbuf_setlen(mbcur, mbuf_len(mbcur) - need);
711 return error;
712 }
713 /* update pointer/need */
714 nmc->nmc_ptr += cplen;
715 need -= cplen;
716 }
717 /* if more needed, go to next mbuf */
718 if (need) {
719 mb = mbuf_next(mb);
720 }
721 }
722
723 /* did we run out of data in the mbuf chain? */
724 if (need) {
725 mbuf_setlen(mbcur, mbuf_len(mbcur) - need);
726 return EBADRPC;
727 }
728
729 /*
730 * update nmc's state to point after this contiguous data
731 *
732 * "mb" points to the last mbuf we copied data from so we
733 * just set nmc to point at whatever remains in that mbuf.
734 */
735 nmc->nmc_mcur = mb;
736 nmc->nmc_ptr = mbuf_data(mb);
737 nmc->nmc_left = mbuf_len(mb);
738
739 /* move past any padding */
740 if (padlen) {
741 error = nfsm_chain_advance(nmc, padlen);
742 }
743
744 return error;
745}
746
747/*
748 * nfsm_chain_get_opaque_f()
749 *
750 * Read the next "len" bytes in the chain into "buf".
751 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
752 */
753int
754nfsm_chain_get_opaque_f(struct nfsm_chain *nmc, size_t len, u_char *buf)
755{
756 size_t cplen, padlen;
757 int error = 0;
758
759 padlen = nfsm_rndup(len) - len;
760
761 /* loop through mbufs copying all the data we need */
762 while (len && nmc->nmc_mcur) {
763 /* copy as much as we need/can */
764 cplen = MIN(nmc->nmc_left, len);
765 if (cplen) {
766 bcopy(nmc->nmc_ptr, buf, cplen);
767 nmc->nmc_ptr += cplen;
768 nmc->nmc_left -= cplen;
769 buf += cplen;
770 len -= cplen;
771 }
772 /* if more needed, go to next mbuf */
773 if (len) {
774 mbuf_t mb = mbuf_next(nmc->nmc_mcur);
775 nmc->nmc_mcur = mb;
776 nmc->nmc_ptr = mb ? mbuf_data(mb) : NULL;
777 nmc->nmc_left = mb ? mbuf_len(mb) : 0;
778 }
779 }
780
781 /* did we run out of data in the mbuf chain? */
782 if (len) {
783 return EBADRPC;
784 }
785
786 if (padlen) {
787 nfsm_chain_adv(error, nmc, padlen);
788 }
789
790 return error;
791}
792
793/*
794 * nfsm_chain_get_uio()
795 *
796 * Read the next "len" bytes in the chain into the given uio.
797 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
798 */
799int
800nfsm_chain_get_uio(struct nfsm_chain *nmc, size_t len, uio_t uio)
801{
802 size_t cplen, padlen;
803 int error = 0;
804
805 padlen = nfsm_rndup(len) - len;
806
807 /* loop through mbufs copying all the data we need */
808 while (len && nmc->nmc_mcur) {
809 /* copy as much as we need/can */
810 cplen = MIN(nmc->nmc_left, len);
811 if (cplen) {
812 cplen = MIN(cplen, INT32_MAX);
813 error = uiomove(nmc->nmc_ptr, (int)cplen, uio);
814 if (error) {
815 return error;
816 }
817 nmc->nmc_ptr += cplen;
818 nmc->nmc_left -= cplen;
819 len -= cplen;
820 }
821 /* if more needed, go to next mbuf */
822 if (len) {
823 mbuf_t mb = mbuf_next(nmc->nmc_mcur);
824 nmc->nmc_mcur = mb;
825 nmc->nmc_ptr = mb ? mbuf_data(mb) : NULL;
826 nmc->nmc_left = mb ? mbuf_len(mb) : 0;
827 }
828 }
829
830 /* did we run out of data in the mbuf chain? */
831 if (len) {
832 return EBADRPC;
833 }
834
835 if (padlen) {
836 nfsm_chain_adv(error, nmc, padlen);
837 }
838
839 return error;
840}
841
842#if CONFIG_NFS_CLIENT
843
844int
845nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, size_t slen)
846{
847 uint8_t smallbuf[64];
848 uint8_t *nfcname = smallbuf;
849 size_t buflen = sizeof(smallbuf), nfclen;
850 int error;
851
852 error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM);
853 if (error == ENAMETOOLONG) {
854 buflen = MAXPATHLEN;
855 nfcname = zalloc(ZV_NAMEI);
856 error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM);
857 }
858
859 /* if we got an error, just use the original string */
860 if (error) {
861 nfsm_chain_add_string(error, nmc, s, slen);
862 } else {
863 nfsm_chain_add_string(error, nmc, nfcname, nfclen);
864 }
865
866 if (nfcname && (nfcname != smallbuf)) {
867 NFS_ZFREE(ZV_NAMEI, nfcname);
868 }
869 return error;
870}
871
872/*
873 * Add an NFSv2 "sattr" structure to an mbuf chain
874 */
875int
876nfsm_chain_add_v2sattr_f(struct nfsm_chain *nmc, struct vnode_attr *vap, uint32_t szrdev)
877{
878 int error = 0;
879
880 nfsm_chain_add_32(error, nmc, vtonfsv2_mode(vap->va_type,
881 (VATTR_IS_ACTIVE(vap, va_mode) ? vap->va_mode : 0600)));
882 nfsm_chain_add_32(error, nmc,
883 VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uint32_t)-1);
884 nfsm_chain_add_32(error, nmc,
885 VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (uint32_t)-1);
886 nfsm_chain_add_32(error, nmc, szrdev);
887 nfsm_chain_add_v2time(error, nmc,
888 VATTR_IS_ACTIVE(vap, va_access_time) ?
889 &vap->va_access_time : NULL);
890 nfsm_chain_add_v2time(error, nmc,
891 VATTR_IS_ACTIVE(vap, va_modify_time) ?
892 &vap->va_modify_time : NULL);
893
894 return error;
895}
896
897/*
898 * Add an NFSv3 "sattr" structure to an mbuf chain
899 */
900int
901nfsm_chain_add_v3sattr_f(
902 __unused struct nfsmount *nmp,
903 struct nfsm_chain *nmc,
904 struct vnode_attr *vap)
905{
906 int error = 0;
907
908 if (VATTR_IS_ACTIVE(vap, va_mode)) {
909 nfsm_chain_add_32(error, nmc, TRUE);
910 nfsm_chain_add_32(error, nmc, vap->va_mode);
911 } else {
912 nfsm_chain_add_32(error, nmc, FALSE);
913 }
914 if (VATTR_IS_ACTIVE(vap, va_uid)) {
915 nfsm_chain_add_32(error, nmc, TRUE);
916 nfsm_chain_add_32(error, nmc, vap->va_uid);
917 } else {
918 nfsm_chain_add_32(error, nmc, FALSE);
919 }
920 if (VATTR_IS_ACTIVE(vap, va_gid)) {
921 nfsm_chain_add_32(error, nmc, TRUE);
922 nfsm_chain_add_32(error, nmc, vap->va_gid);
923 } else {
924 nfsm_chain_add_32(error, nmc, FALSE);
925 }
926 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
927 nfsm_chain_add_32(error, nmc, TRUE);
928 nfsm_chain_add_64(error, nmc, vap->va_data_size);
929 } else {
930 nfsm_chain_add_32(error, nmc, FALSE);
931 }
932 if (vap->va_vaflags & VA_UTIMES_NULL) {
933 nfsm_chain_add_32(error, nmc, NFS_TIME_SET_TO_SERVER);
934 nfsm_chain_add_32(error, nmc, NFS_TIME_SET_TO_SERVER);
935 } else {
936 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
937 nfsm_chain_add_32(error, nmc, NFS_TIME_SET_TO_CLIENT);
938 nfsm_chain_add_32(error, nmc, vap->va_access_time.tv_sec);
939 nfsm_chain_add_32(error, nmc, vap->va_access_time.tv_nsec);
940 } else {
941 nfsm_chain_add_32(error, nmc, NFS_TIME_DONT_CHANGE);
942 }
943 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
944 nfsm_chain_add_32(error, nmc, NFS_TIME_SET_TO_CLIENT);
945 nfsm_chain_add_32(error, nmc, vap->va_modify_time.tv_sec);
946 nfsm_chain_add_32(error, nmc, vap->va_modify_time.tv_nsec);
947 } else {
948 nfsm_chain_add_32(error, nmc, NFS_TIME_DONT_CHANGE);
949 }
950 }
951
952
953 return error;
954}
955
956
957/*
958 * nfsm_chain_get_fh_attr()
959 *
960 * Get the file handle and attributes from an mbuf chain. (NFSv2/v3)
961 */
962int
963nfsm_chain_get_fh_attr(
964 struct nfsmount *nmp,
965 struct nfsm_chain *nmc,
966 nfsnode_t dnp,
967 vfs_context_t ctx,
968 int nfsvers,
969 uint64_t *xidp,
970 fhandle_t *fhp,
971 struct nfs_vattr *nvap)
972{
973 int error = 0, gotfh, gotattr;
974
975 gotfh = gotattr = 1;
976
977 if (nfsvers == NFS_VER3) { /* check for file handle */
978 nfsm_chain_get_32(error, nmc, gotfh);
979 }
980 if (!error && gotfh) { /* get file handle */
981 nfsm_chain_get_fh(error, nmc, nfsvers, fhp);
982 } else {
983 fhp->fh_len = 0;
984 }
985 if (nfsvers == NFS_VER3) { /* check for file attributes */
986 nfsm_chain_get_32(error, nmc, gotattr);
987 }
988 nfsmout_if(error);
989 if (gotattr) {
990 if (!gotfh) { /* skip attributes */
991 nfsm_chain_adv(error, nmc, NFSX_V3FATTR);
992 } else { /* get attributes */
993 error = nfs_parsefattr(nmp, nmc, nfsvers, nvap);
994 }
995 } else if (gotfh) {
996 /* we need valid attributes in order to call nfs_nget() */
997 if (nfs3_getattr_rpc(NULL, NFSTOMP(dnp), fhp->fh_data, fhp->fh_len, 0, ctx, nvap, xidp)) {
998 gotattr = 0;
999 fhp->fh_len = 0;
1000 }
1001 }
1002nfsmout:
1003 return error;
1004}
1005
1006/*
1007 * Get and process NFSv3 WCC data from an mbuf chain
1008 */
1009int
1010nfsm_chain_get_wcc_data_f(
1011 struct nfsm_chain *nmc,
1012 nfsnode_t np,
1013 struct timespec *premtime,
1014 int *newpostattr,
1015 u_int64_t *xidp)
1016{
1017 int error = 0;
1018 uint32_t flag = 0;
1019
1020 nfsm_chain_get_32(error, nmc, flag);
1021 if (!error && flag) {
1022 nfsm_chain_adv(error, nmc, 2 * NFSX_UNSIGNED);
1023 nfsm_chain_get_32(error, nmc, premtime->tv_sec);
1024 nfsm_chain_get_32(error, nmc, premtime->tv_nsec);
1025 nfsm_chain_adv(error, nmc, 2 * NFSX_UNSIGNED);
1026 } else {
1027 premtime->tv_sec = 0;
1028 premtime->tv_nsec = 0;
1029 }
1030 nfsm_chain_postop_attr_update_flag(error, nmc, np, *newpostattr, xidp);
1031
1032 return error;
1033}
1034
1035/*
1036 * Get the next RPC transaction ID (XID)
1037 */
1038void
1039nfs_get_xid(uint64_t *xidp)
1040{
1041 struct timeval tv;
1042
1043 lck_mtx_lock(&nfs_request_mutex);
1044 if (!nfs_xid) {
1045 /*
1046 * Derive initial xid from system time.
1047 *
1048 * Note: it's OK if this code inits nfs_xid to 0 (for example,
1049 * due to a broken clock) because we immediately increment it
1050 * and we guarantee to never use xid 0. So, nfs_xid should only
1051 * ever be 0 the first time this function is called.
1052 */
1053 microtime(&tv);
1054 nfs_xid = tv.tv_sec << 12;
1055 }
1056 if (++nfs_xid == 0) {
1057 /* Skip zero xid if it should ever happen. */
1058 nfs_xidwrap++;
1059 nfs_xid++;
1060 }
1061 *xidp = nfs_xid + (nfs_xidwrap << 32);
1062 lck_mtx_unlock(&nfs_request_mutex);
1063}
1064
1065/*
1066 * Build the RPC header and fill in the authorization info.
1067 * Returns the head of the mbuf list and the xid.
1068 */
1069
1070int
1071nfsm_rpchead(
1072 struct nfsreq *req,
1073 mbuf_t mrest,
1074 u_int64_t *xidp,
1075 mbuf_t *mreqp)
1076{
1077 struct nfsmount *nmp = req->r_nmp;
1078 int nfsvers = nmp->nm_vers;
1079 int proc = ((nfsvers == NFS_VER2) ? nfsv2_procid[req->r_procnum] : (int)req->r_procnum);
1080
1081 return nfsm_rpchead2(nmp, nmp->nm_sotype, NFS_PROG, nfsvers, proc,
1082 req->r_auth, req->r_cred, req, mrest, xidp, mreqp);
1083}
1084
1085/*
1086 * get_auiliary_groups: Gets the supplementary groups from a credential.
1087 *
1088 * IN: cred: credential to get the associated groups from.
1089 * OUT: groups: An array of gids of NGROUPS size.
1090 * IN: count: The number of groups to get; i.e.; the number of groups the server supports
1091 *
1092 * returns: The number of groups found.
1093 *
1094 * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less
1095 * than NGROUPS.
1096 */
1097static size_t
1098get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], size_t count)
1099{
1100 gid_t pgid;
1101 size_t maxcount = count < NGROUPS ? count + 1 : NGROUPS;
1102 size_t i;
1103
1104 for (i = 0; i < NGROUPS; i++) {
1105 groups[i] = -2; /* Initialize to the nobody group */
1106 }
1107 (void)kauth_cred_getgroups(cred, groups, &maxcount);
1108 if (maxcount < 1) {
1109 return maxcount;
1110 }
1111
1112 /*
1113 * kauth_get_groups returns the primary group followed by the
1114 * users auxiliary groups. If the number of groups the server supports
1115 * is less than NGROUPS, then we will drop the first group so that
1116 * we can send one more group over the wire.
1117 */
1118
1119
1120 if (count < NGROUPS) {
1121 pgid = kauth_cred_getgid(cred);
1122 if (pgid == groups[0]) {
1123 maxcount -= 1;
1124 for (i = 0; i < maxcount; i++) {
1125 groups[i] = groups[i + 1];
1126 }
1127 }
1128 }
1129
1130 return maxcount;
1131}
1132
1133int
1134nfsm_rpchead2(__unused struct nfsmount *nmp, int sotype, int prog, int vers, int proc, int auth_type,
1135 kauth_cred_t cred, struct nfsreq *req, mbuf_t mrest, u_int64_t *xidp, mbuf_t *mreqp)
1136{
1137 mbuf_t mreq, mb;
1138 size_t i;
1139 int error, auth_len = 0, authsiz, reqlen;
1140 size_t headlen;
1141 struct nfsm_chain nmreq;
1142 gid_t grouplist[NGROUPS];
1143 size_t groupcount = 0;
1144
1145 /* calculate expected auth length */
1146 switch (auth_type) {
1147 case RPCAUTH_NONE:
1148 auth_len = 0;
1149 break;
1150 case RPCAUTH_SYS:
1151 {
1152 size_t count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS;
1153
1154 if (!cred) {
1155 return EINVAL;
1156 }
1157 groupcount = get_auxiliary_groups(cred, grouplist, count);
1158 auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED;
1159 break;
1160 }
1161#if CONFIG_NFS_GSS
1162 case RPCAUTH_KRB5:
1163 case RPCAUTH_KRB5I:
1164 case RPCAUTH_KRB5P:
1165 if (!req || !cred) {
1166 return EINVAL;
1167 }
1168 auth_len = 5 * NFSX_UNSIGNED + 0; // zero context handle for now
1169 break;
1170#endif /* CONFIG_NFS_GSS */
1171 default:
1172 return EINVAL;
1173 }
1174 authsiz = nfsm_rndup(auth_len);
1175
1176 /* allocate the packet */
1177 headlen = authsiz + 10 * NFSX_UNSIGNED;
1178 if (sotype == SOCK_STREAM) { /* also include room for any RPC Record Mark */
1179 headlen += NFSX_UNSIGNED;
1180 }
1181 if (headlen >= nfs_mbuf_minclsize) {
1182 error = mbuf_getpacket(MBUF_WAITOK, &mreq);
1183 } else {
1184 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mreq);
1185 if (!error) {
1186 if (headlen < nfs_mbuf_mhlen) {
1187 mbuf_align_32(mreq, headlen);
1188 } else {
1189 mbuf_align_32(mreq, 8 * NFSX_UNSIGNED);
1190 }
1191 }
1192 }
1193 if (error) {
1194 /* unable to allocate packet */
1195 /* XXX should we keep statistics for these errors? */
1196 return error;
1197 }
1198
1199 /*
1200 * If the caller gave us a non-zero XID then use it because
1201 * it may be a higher-level resend with a GSSAPI credential.
1202 * Otherwise, allocate a new one.
1203 */
1204 if (*xidp == 0) {
1205 nfs_get_xid(xidp);
1206 }
1207
1208 /* build the header(s) */
1209 nfsm_chain_init(&nmreq, mreq);
1210
1211 /* First, if it's a TCP stream insert space for an RPC record mark */
1212 if (sotype == SOCK_STREAM) {
1213 nfsm_chain_add_32(error, &nmreq, 0);
1214 }
1215
1216 /* Then the RPC header. */
1217 nfsm_chain_add_32(error, &nmreq, (*xidp & 0xffffffff));
1218 nfsm_chain_add_32(error, &nmreq, RPC_CALL);
1219 nfsm_chain_add_32(error, &nmreq, RPC_VER2);
1220 nfsm_chain_add_32(error, &nmreq, prog);
1221 nfsm_chain_add_32(error, &nmreq, vers);
1222 nfsm_chain_add_32(error, &nmreq, proc);
1223
1224#if CONFIG_NFS_GSS
1225add_cred:
1226#endif
1227 switch (auth_type) {
1228 case RPCAUTH_NONE:
1229 nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* auth */
1230 nfsm_chain_add_32(error, &nmreq, 0); /* length */
1231 nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* verf */
1232 nfsm_chain_add_32(error, &nmreq, 0); /* length */
1233 nfsm_chain_build_done(error, &nmreq);
1234 /* Append the args mbufs */
1235 if (!error) {
1236 error = mbuf_setnext(nmreq.nmc_mcur, mrest);
1237 }
1238 break;
1239 case RPCAUTH_SYS: {
1240 nfsm_chain_add_32(error, &nmreq, RPCAUTH_SYS);
1241 nfsm_chain_add_32(error, &nmreq, authsiz);
1242 {
1243 nfsm_chain_add_32(error, &nmreq, 0); /* stamp */
1244 }
1245 nfsm_chain_add_32(error, &nmreq, 0); /* zero-length hostname */
1246 nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(cred)); /* UID */
1247 nfsm_chain_add_32(error, &nmreq, kauth_cred_getgid(cred)); /* GID */
1248 nfsm_chain_add_32(error, &nmreq, groupcount);/* additional GIDs */
1249 for (i = 0; i < groupcount; i++) {
1250 nfsm_chain_add_32(error, &nmreq, grouplist[i]);
1251 }
1252
1253 /* And the verifier... */
1254 nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* flavor */
1255 nfsm_chain_add_32(error, &nmreq, 0); /* length */
1256 nfsm_chain_build_done(error, &nmreq);
1257
1258 /* Append the args mbufs */
1259 if (!error) {
1260 error = mbuf_setnext(nmreq.nmc_mcur, mrest);
1261 }
1262 break;
1263 }
1264#if CONFIG_NFS_GSS
1265 case RPCAUTH_KRB5:
1266 case RPCAUTH_KRB5I:
1267 case RPCAUTH_KRB5P:
1268 error = nfs_gss_clnt_cred_put(req, &nmreq, mrest);
1269 if (error == ENEEDAUTH) {
1270 size_t count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS;
1271
1272 /*
1273 * Use sec=sys for this user
1274 */
1275 error = 0;
1276 req->r_auth = auth_type = RPCAUTH_SYS;
1277 groupcount = get_auxiliary_groups(cred, grouplist, count);
1278 auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED;
1279 authsiz = nfsm_rndup(auth_len);
1280 goto add_cred;
1281 }
1282 break;
1283#endif /* CONFIG_NFS_GSS */
1284 }
1285 ;
1286
1287 /* finish setting up the packet */
1288 if (!error) {
1289 error = mbuf_pkthdr_setrcvif(mreq, 0);
1290 }
1291
1292 if (error) {
1293 mbuf_freem(mreq);
1294 return error;
1295 }
1296
1297 /* Calculate the size of the request */
1298 reqlen = 0;
1299 for (mb = nmreq.nmc_mhead; mb; mb = mbuf_next(mb)) {
1300 reqlen += mbuf_len(mb);
1301 }
1302
1303 mbuf_pkthdr_setlen(mreq, reqlen);
1304
1305 /*
1306 * If the request goes on a TCP stream,
1307 * set its size in the RPC record mark.
1308 * The record mark count doesn't include itself
1309 * and the last fragment bit is set.
1310 */
1311 if (sotype == SOCK_STREAM) {
1312 nfsm_chain_set_recmark(error, &nmreq,
1313 (reqlen - NFSX_UNSIGNED) | 0x80000000);
1314 }
1315
1316 *mreqp = mreq;
1317 return 0;
1318}
1319
1320/*
1321 * Parse an NFS file attribute structure out of an mbuf chain.
1322 */
1323int
1324nfs_parsefattr(
1325 __unused struct nfsmount *nmp,
1326 struct nfsm_chain *nmc,
1327 int nfsvers,
1328 struct nfs_vattr *nvap)
1329{
1330 int error = 0;
1331 enum vtype vtype;
1332 nfstype nvtype;
1333 uint32_t vmode, val, val2;
1334 dev_t rdev;
1335
1336 val = val2 = 0;
1337 NVATTR_INIT(nvap);
1338
1339 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_TYPE);
1340 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_MODE);
1341 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_NUMLINKS);
1342 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_OWNER);
1343 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP);
1344 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_SIZE);
1345 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_SPACE_USED);
1346 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_RAWDEV);
1347 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_FSID);
1348 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_FILEID);
1349 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_TIME_ACCESS);
1350 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_TIME_MODIFY);
1351 NFS_BITMAP_SET(nvap->nva_bitmap, NFS_FATTR_TIME_METADATA);
1352
1353 nfsm_chain_get_32(error, nmc, nvtype);
1354 nfsm_chain_get_32(error, nmc, vmode);
1355 nfsmout_if(error);
1356
1357 if (nfsvers == NFS_VER3) {
1358 nvap->nva_type = vtype = nfstov_type(nvtype, nfsvers);
1359 } else {
1360 /*
1361 * The duplicate information returned in fa_type and fa_mode
1362 * is an ambiguity in the NFS version 2 protocol.
1363 *
1364 * VREG should be taken literally as a regular file. If a
1365 * server intends to return some type information differently
1366 * in the upper bits of the mode field (e.g. for sockets, or
1367 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
1368 * leave the examination of the mode bits even in the VREG
1369 * case to avoid breakage for bogus servers, but we make sure
1370 * that there are actually type bits set in the upper part of
1371 * fa_mode (and failing that, trust the va_type field).
1372 *
1373 * NFSv3 cleared the issue, and requires fa_mode to not
1374 * contain any type information (while also introducing
1375 * sockets and FIFOs for fa_type).
1376 */
1377 vtype = nfstov_type(nvtype, nfsvers);
1378 if ((vtype == VNON) || ((vtype == VREG) && ((vmode & S_IFMT) != 0))) {
1379 vtype = IFTOVT(vmode);
1380 }
1381 nvap->nva_type = vtype;
1382 }
1383
1384 nvap->nva_mode = (vmode & 07777);
1385
1386 nfsm_chain_get_32(error, nmc, nvap->nva_nlink);
1387 nfsm_chain_get_32(error, nmc, nvap->nva_uid);
1388 nfsm_chain_get_32(error, nmc, nvap->nva_gid);
1389
1390 if (nfsvers == NFS_VER3) {
1391 nfsm_chain_get_64(error, nmc, nvap->nva_size);
1392 nfsm_chain_get_64(error, nmc, nvap->nva_bytes);
1393 nfsm_chain_get_32(error, nmc, nvap->nva_rawdev.specdata1);
1394 nfsm_chain_get_32(error, nmc, nvap->nva_rawdev.specdata2);
1395 nfsmout_if(error);
1396 nfsm_chain_get_64(error, nmc, nvap->nva_fsid.major);
1397 nvap->nva_fsid.minor = 0;
1398 nfsm_chain_get_64(error, nmc, nvap->nva_fileid);
1399 } else {
1400 nfsm_chain_get_32(error, nmc, nvap->nva_size);
1401 nfsm_chain_adv(error, nmc, NFSX_UNSIGNED);
1402 nfsm_chain_get_32(error, nmc, rdev);
1403 nfsmout_if(error);
1404 nvap->nva_rawdev.specdata1 = major(rdev);
1405 nvap->nva_rawdev.specdata2 = minor(rdev);
1406 nfsm_chain_get_32(error, nmc, val); /* blocks */
1407 nfsmout_if(error);
1408 nvap->nva_bytes = val * NFS_FABLKSIZE;
1409 nfsm_chain_get_32(error, nmc, val);
1410 nfsmout_if(error);
1411 nvap->nva_fsid.major = (uint64_t)val;
1412 nvap->nva_fsid.minor = 0;
1413 nfsm_chain_get_32(error, nmc, val);
1414 nfsmout_if(error);
1415 nvap->nva_fileid = (uint64_t)val;
1416 /* Really ugly NFSv2 kludge. */
1417 if ((vtype == VCHR) && (rdev == (dev_t)0xffffffff)) {
1418 nvap->nva_type = VFIFO;
1419 }
1420 }
1421 nfsm_chain_get_time(error, nmc, nfsvers,
1422 nvap->nva_timesec[NFSTIME_ACCESS],
1423 nvap->nva_timensec[NFSTIME_ACCESS]);
1424 nfsm_chain_get_time(error, nmc, nfsvers,
1425 nvap->nva_timesec[NFSTIME_MODIFY],
1426 nvap->nva_timensec[NFSTIME_MODIFY]);
1427 nfsm_chain_get_time(error, nmc, nfsvers,
1428 nvap->nva_timesec[NFSTIME_CHANGE],
1429 nvap->nva_timensec[NFSTIME_CHANGE]);
1430
1431nfsmout:
1432 return error;
1433}
1434
1435
1436/*
1437 * Load the attribute cache (that lives in the nfsnode entry) with
1438 * the value pointed to by nvap, unless the file type in the attribute
1439 * cache doesn't match the file type in the nvap, in which case log a
1440 * warning and return ESTALE.
1441 *
1442 * If the dontshrink flag is set, then it's not safe to call ubc_setsize()
1443 * to shrink the size of the file.
1444 */
1445int
1446nfs_loadattrcache(
1447 nfsnode_t np,
1448 struct nfs_vattr *nvap,
1449 u_int64_t *xidp,
1450 int dontshrink)
1451{
1452 mount_t mp;
1453 vnode_t vp;
1454 struct timeval now;
1455 struct nfs_vattr *npnvap;
1456 int xattr = np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR;
1457 int referral = np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL;
1458 int aclbit, monitored, error = 0;
1459 kauth_acl_t acl;
1460 struct nfsmount *nmp;
1461 uint32_t events = np->n_events;
1462
1463 if (np->n_hflag & NHINIT) {
1464 vp = NULL;
1465 mp = np->n_mount;
1466 } else {
1467 vp = NFSTOV(np);
1468 mp = vnode_mount(vp);
1469 }
1470 monitored = vp ? vnode_ismonitored(vp) : 0;
1471
1472 FSDBG_TOP(527, np, vp, *xidp >> 32, *xidp);
1473
1474 if (!((nmp = VFSTONFS(mp)))) {
1475 FSDBG_BOT(527, ENXIO, 1, 0, *xidp);
1476 return ENXIO;
1477 }
1478
1479 if (*xidp < np->n_xid) {
1480 /*
1481 * We have already updated attributes with a response from
1482 * a later request. The attributes we have here are probably
1483 * stale so we drop them (just return). However, our
1484 * out-of-order receipt could be correct - if the requests were
1485 * processed out of order at the server. Given the uncertainty
1486 * we invalidate our cached attributes. *xidp is zeroed here
1487 * to indicate the attributes were dropped - only getattr
1488 * cares - it needs to retry the rpc.
1489 */
1490 NATTRINVALIDATE(np);
1491 FSDBG_BOT(527, 0, np, np->n_xid, *xidp);
1492 *xidp = 0;
1493 return 0;
1494 }
1495
1496 if (vp && (nvap->nva_type != vnode_vtype(vp))) {
1497 /*
1498 * The filehandle has changed type on us. This can be
1499 * caused by either the server not having unique filehandles
1500 * or because another client has removed the previous
1501 * filehandle and a new object (of a different type)
1502 * has been created with the same filehandle.
1503 *
1504 * We can't simply switch the type on the vnode because
1505 * there may be type-specific fields that need to be
1506 * cleaned up or set up.
1507 *
1508 * So, what should we do with this vnode?
1509 *
1510 * About the best we can do is log a warning and return
1511 * an error. ESTALE is about the closest error, but it
1512 * is a little strange that we come up with this error
1513 * internally instead of simply passing it through from
1514 * the server. Hopefully, the vnode will be reclaimed
1515 * soon so the filehandle can be reincarnated as the new
1516 * object type.
1517 */
1518 printf("nfs loadattrcache vnode changed type, was %d now %d\n",
1519 vnode_vtype(vp), nvap->nva_type);
1520 error = ESTALE;
1521 if (monitored) {
1522 events |= VNODE_EVENT_DELETE;
1523 }
1524 goto out;
1525 }
1526
1527 npnvap = &np->n_vattr;
1528
1529 /*
1530 * The ACL cache needs special handling because it is not
1531 * always updated. Save current ACL cache state so it can
1532 * be restored after copying the new attributes into place.
1533 */
1534 aclbit = NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ACL);
1535 acl = npnvap->nva_acl;
1536
1537 if (monitored) {
1538 /*
1539 * For monitored nodes, check for attribute changes that should generate events.
1540 */
1541 if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_NUMLINKS) &&
1542 (nvap->nva_nlink != npnvap->nva_nlink)) {
1543 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_LINK;
1544 }
1545 if (events & VNODE_EVENT_PERMS) {
1546 /* no need to do all the checking if it's already set */;
1547 } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_MODE) &&
1548 (nvap->nva_mode != npnvap->nva_mode)) {
1549 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1550 } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER) &&
1551 (nvap->nva_uid != npnvap->nva_uid)) {
1552 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1553 } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) &&
1554 (nvap->nva_gid != npnvap->nva_gid)) {
1555 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1556#if CONFIG_NFS4
1557 } else if (nmp->nm_vers >= NFS_VER4) {
1558 if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER) &&
1559 !kauth_guid_equal(&nvap->nva_uuuid, &npnvap->nva_uuuid)) {
1560 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1561 } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) &&
1562 !kauth_guid_equal(&nvap->nva_guuid, &npnvap->nva_guuid)) {
1563 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1564 } else if ((NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL) &&
1565 nvap->nva_acl && npnvap->nva_acl &&
1566 ((nvap->nva_acl->acl_entrycount != npnvap->nva_acl->acl_entrycount) ||
1567 bcmp(nvap->nva_acl, npnvap->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl))))) {
1568 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS;
1569 }
1570#endif
1571 }
1572 if (/* Oh, C... */
1573#if CONFIG_NFS4
1574 ((nmp->nm_vers >= NFS_VER4) && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_CHANGE) && (nvap->nva_change != npnvap->nva_change)) ||
1575#endif
1576 (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_MODIFY) &&
1577 ((nvap->nva_timesec[NFSTIME_MODIFY] != npnvap->nva_timesec[NFSTIME_MODIFY]) ||
1578 (nvap->nva_timensec[NFSTIME_MODIFY] != npnvap->nva_timensec[NFSTIME_MODIFY])))) {
1579 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE;
1580 }
1581 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_RAWDEV) &&
1582 ((nvap->nva_rawdev.specdata1 != npnvap->nva_rawdev.specdata1) ||
1583 (nvap->nva_rawdev.specdata2 != npnvap->nva_rawdev.specdata2))) {
1584 events |= VNODE_EVENT_ATTRIB;
1585 }
1586 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_FILEID) &&
1587 (nvap->nva_fileid != npnvap->nva_fileid)) {
1588 events |= VNODE_EVENT_ATTRIB;
1589 }
1590 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ARCHIVE) &&
1591 ((nvap->nva_flags & NFS_FFLAG_ARCHIVED) != (npnvap->nva_flags & NFS_FFLAG_ARCHIVED))) {
1592 events |= VNODE_EVENT_ATTRIB;
1593 }
1594 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_HIDDEN) &&
1595 ((nvap->nva_flags & NFS_FFLAG_HIDDEN) != (npnvap->nva_flags & NFS_FFLAG_HIDDEN))) {
1596 events |= VNODE_EVENT_ATTRIB;
1597 }
1598 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_CREATE) &&
1599 ((nvap->nva_timesec[NFSTIME_CREATE] != npnvap->nva_timesec[NFSTIME_CREATE]) ||
1600 (nvap->nva_timensec[NFSTIME_CREATE] != npnvap->nva_timensec[NFSTIME_CREATE]))) {
1601 events |= VNODE_EVENT_ATTRIB;
1602 }
1603 if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_BACKUP) &&
1604 ((nvap->nva_timesec[NFSTIME_BACKUP] != npnvap->nva_timesec[NFSTIME_BACKUP]) ||
1605 (nvap->nva_timensec[NFSTIME_BACKUP] != npnvap->nva_timensec[NFSTIME_BACKUP]))) {
1606 events |= VNODE_EVENT_ATTRIB;
1607 }
1608 }
1609
1610#if CONFIG_NFS4
1611 /* Copy the attributes to the attribute cache */
1612 if (nmp->nm_vers >= NFS_VER4 && npnvap->nva_flags & NFS_FFLAG_PARTIAL_WRITE) {
1613 /*
1614 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
1615 * In such cases, we do not update the time stamp - but the requested attributes.
1616 */
1617 NFS_BITMAP_COPY_ATTR(nvap, npnvap, TYPE, type);
1618 NFS_BITMAP_COPY_ATTR(nvap, npnvap, CHANGE, change);
1619 NFS_BITMAP_COPY_ATTR(nvap, npnvap, SIZE, size);
1620 NFS_BITMAP_COPY_TIME(nvap, npnvap, METADATA, CHANGE);
1621 NFS_BITMAP_COPY_TIME(nvap, npnvap, MODIFY, MODIFY);
1622 } else
1623#endif /* CONFIG_NFS4 */
1624 {
1625 bcopy((caddr_t)nvap, (caddr_t)npnvap, sizeof(*nvap));
1626 microuptime(&now);
1627 np->n_attrstamp = now.tv_sec;
1628 }
1629
1630 np->n_xid = *xidp;
1631 /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */
1632 if (vp && xattr) {
1633 nvap->nva_flags |= xattr;
1634 }
1635 if (vp && referral) {
1636 nvap->nva_flags |= referral;
1637 }
1638
1639 if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
1640 /* we're updating the ACL */
1641 if (nvap->nva_acl) {
1642 /* make a copy of the acl for the cache */
1643 npnvap->nva_acl = kauth_acl_alloc(nvap->nva_acl->acl_entrycount);
1644 if (npnvap->nva_acl) {
1645 bcopy(nvap->nva_acl, npnvap->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl));
1646 } else {
1647 /* can't make a copy to cache, invalidate ACL cache */
1648 NFS_BITMAP_CLR(npnvap->nva_bitmap, NFS_FATTR_ACL);
1649 NACLINVALIDATE(np);
1650 aclbit = 0;
1651 }
1652 }
1653 if (acl) {
1654 kauth_acl_free(acl);
1655 acl = NULL;
1656 }
1657 }
1658 if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) {
1659 /* update the ACL timestamp */
1660 np->n_aclstamp = now.tv_sec;
1661 } else {
1662 /* we aren't updating the ACL, so restore original values */
1663 if (aclbit) {
1664 NFS_BITMAP_SET(npnvap->nva_bitmap, NFS_FATTR_ACL);
1665 }
1666 npnvap->nva_acl = acl;
1667 }
1668
1669#if CONFIG_TRIGGERS
1670#if CONFIG_NFS4
1671 /*
1672 * For NFSv4, if the fsid doesn't match the fsid for the mount, then
1673 * this node is for a different file system on the server. So we mark
1674 * this node as a trigger node that will trigger the mirror mount.
1675 */
1676 if ((nmp->nm_vers >= NFS_VER4) && (nvap->nva_type == VDIR) &&
1677 ((np->n_vattr.nva_fsid.major != nmp->nm_fsid.major) ||
1678 (np->n_vattr.nva_fsid.minor != nmp->nm_fsid.minor))) {
1679 np->n_vattr.nva_flags |= NFS_FFLAG_TRIGGER;
1680 }
1681#endif /* CONFIG_NFS4 */
1682#endif /* CONFIG_TRIGGERS */
1683
1684 if (!vp || (nvap->nva_type != VREG)) {
1685 np->n_size = nvap->nva_size;
1686 } else if (nvap->nva_size != np->n_size) {
1687 FSDBG(527, np, nvap->nva_size, np->n_size, (nvap->nva_type == VREG) | (np->n_flag & NMODIFIED ? 6 : 4));
1688 if (!UBCINFOEXISTS(vp) || (dontshrink && (nvap->nva_size < np->n_size))) {
1689 /* asked not to shrink, so stick with current size */
1690 FSDBG(527, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001);
1691 nvap->nva_size = np->n_size;
1692 NATTRINVALIDATE(np);
1693 } else if ((np->n_flag & NMODIFIED) && (nvap->nva_size < np->n_size)) {
1694 /* if we've modified, stick with larger size */
1695 FSDBG(527, np, np->n_size, np->n_vattr.nva_size, 0xf00d0002);
1696 nvap->nva_size = np->n_size;
1697 npnvap->nva_size = np->n_size;
1698 } else {
1699 /*
1700 * n_size is protected by the data lock, so we need to
1701 * defer updating it until it's safe. We save the new size
1702 * and set a flag and it'll get updated the next time we get/drop
1703 * the data lock or the next time we do a getattr.
1704 */
1705 np->n_newsize = nvap->nva_size;
1706 SET(np->n_flag, NUPDATESIZE);
1707 if (monitored) {
1708 events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_EXTEND;
1709 }
1710 }
1711 }
1712
1713 if (np->n_flag & NCHG) {
1714 if (np->n_flag & NACC) {
1715 nvap->nva_timesec[NFSTIME_ACCESS] = np->n_atim.tv_sec;
1716 nvap->nva_timensec[NFSTIME_ACCESS] = np->n_atim.tv_nsec;
1717 }
1718 if (np->n_flag & NUPD) {
1719 nvap->nva_timesec[NFSTIME_MODIFY] = np->n_mtim.tv_sec;
1720 nvap->nva_timensec[NFSTIME_MODIFY] = np->n_mtim.tv_nsec;
1721 }
1722 }
1723
1724out:
1725 if (monitored && events) {
1726 nfs_vnode_notify(np, events);
1727 }
1728 FSDBG_BOT(527, error, np, np->n_size, *xidp);
1729 return error;
1730}
1731
1732/*
1733 * Calculate the attribute timeout based on
1734 * how recently the file has been modified.
1735 */
1736long
1737nfs_attrcachetimeout(nfsnode_t np)
1738{
1739 struct nfsmount *nmp;
1740 struct timeval now;
1741 int isdir;
1742 long timeo;
1743
1744 nmp = NFSTONMP(np);
1745 if (nfs_mount_gone(nmp)) {
1746 return 0;
1747 }
1748
1749 isdir = vnode_isdir(NFSTOV(np));
1750#if CONFIG_NFS4
1751 if ((nmp->nm_vers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK)) {
1752 /* If we have a delegation, we always use the max timeout. */
1753 timeo = isdir ? nmp->nm_acdirmax : nmp->nm_acregmax;
1754 } else
1755#endif
1756 if ((np)->n_flag & NMODIFIED) {
1757 /* If we have modifications, we always use the min timeout. */
1758 timeo = isdir ? nmp->nm_acdirmin : nmp->nm_acregmin;
1759 } else {
1760 /* Otherwise, we base the timeout on how old the file seems. */
1761 /* Note that if the client and server clocks are way out of sync, */
1762 /* timeout will probably get clamped to a min or max value */
1763 microtime(&now);
1764 timeo = (now.tv_sec - (np)->n_vattr.nva_timesec[NFSTIME_MODIFY]) / 10;
1765 if (isdir) {
1766 if (timeo < nmp->nm_acdirmin) {
1767 timeo = nmp->nm_acdirmin;
1768 } else if (timeo > nmp->nm_acdirmax) {
1769 timeo = nmp->nm_acdirmax;
1770 }
1771 } else {
1772 if (timeo < nmp->nm_acregmin) {
1773 timeo = nmp->nm_acregmin;
1774 } else if (timeo > nmp->nm_acregmax) {
1775 timeo = nmp->nm_acregmax;
1776 }
1777 }
1778 }
1779
1780 return timeo;
1781}
1782
1783/*
1784 * Check the attribute cache time stamp.
1785 * If the cache is valid, copy contents to *nvaper and return 0
1786 * otherwise return an error.
1787 * Must be called with the node locked.
1788 */
1789int
1790nfs_getattrcache(nfsnode_t np, struct nfs_vattr *nvaper, int flags)
1791{
1792 struct nfs_vattr *nvap;
1793 struct timeval nowup;
1794 long timeo;
1795 struct nfsmount *nmp;
1796
1797 /* Check if the attributes are valid. */
1798 if (!NATTRVALID(np) || ((flags & NGA_ACL) && !NACLVALID(np))) {
1799 FSDBG(528, np, 0, 0xffffff01, ENOENT);
1800 OSAddAtomic64(1, &nfsstats.attrcache_misses);
1801 return ENOENT;
1802 }
1803
1804 nmp = NFSTONMP(np);
1805 if (nfs_mount_gone(nmp)) {
1806 return ENXIO;
1807 }
1808 /*
1809 * Verify the cached attributes haven't timed out.
1810 * If the server isn't responding, skip the check
1811 * and return cached attributes.
1812 */
1813 if (!nfs_use_cache(nmp)) {
1814 microuptime(&nowup);
1815 if (np->n_attrstamp > nowup.tv_sec) {
1816 printf("NFS: Attribute time stamp is in the future by %ld seconds. Invalidating cache\n",
1817 np->n_attrstamp - nowup.tv_sec);
1818 NATTRINVALIDATE(np);
1819 NACCESSINVALIDATE(np);
1820 return ENOENT;
1821 }
1822 timeo = nfs_attrcachetimeout(np);
1823 if ((nowup.tv_sec - np->n_attrstamp) >= timeo) {
1824 FSDBG(528, np, 0, 0xffffff02, ENOENT);
1825 OSAddAtomic64(1, &nfsstats.attrcache_misses);
1826 return ENOENT;
1827 }
1828 if ((flags & NGA_ACL) && ((nowup.tv_sec - np->n_aclstamp) >= timeo)) {
1829 FSDBG(528, np, 0, 0xffffff02, ENOENT);
1830 OSAddAtomic64(1, &nfsstats.attrcache_misses);
1831 return ENOENT;
1832 }
1833 }
1834
1835 nvap = &np->n_vattr;
1836 FSDBG(528, np, nvap->nva_size, np->n_size, 0xcace);
1837 OSAddAtomic64(1, &nfsstats.attrcache_hits);
1838
1839 if (nvap->nva_type != VREG) {
1840 np->n_size = nvap->nva_size;
1841 } else if (nvap->nva_size != np->n_size) {
1842 FSDBG(528, np, nvap->nva_size, np->n_size, (nvap->nva_type == VREG) | (np->n_flag & NMODIFIED ? 6 : 4));
1843 if ((np->n_flag & NMODIFIED) && (nvap->nva_size < np->n_size)) {
1844 /* if we've modified, stick with larger size */
1845 nvap->nva_size = np->n_size;
1846 } else {
1847 /*
1848 * n_size is protected by the data lock, so we need to
1849 * defer updating it until it's safe. We save the new size
1850 * and set a flag and it'll get updated the next time we get/drop
1851 * the data lock or the next time we do a getattr.
1852 */
1853 np->n_newsize = nvap->nva_size;
1854 SET(np->n_flag, NUPDATESIZE);
1855 }
1856 }
1857
1858 bcopy((caddr_t)nvap, (caddr_t)nvaper, sizeof(struct nfs_vattr));
1859 if (np->n_flag & NCHG) {
1860 if (np->n_flag & NACC) {
1861 nvaper->nva_timesec[NFSTIME_ACCESS] = np->n_atim.tv_sec;
1862 nvaper->nva_timensec[NFSTIME_ACCESS] = np->n_atim.tv_nsec;
1863 }
1864 if (np->n_flag & NUPD) {
1865 nvaper->nva_timesec[NFSTIME_MODIFY] = np->n_mtim.tv_sec;
1866 nvaper->nva_timensec[NFSTIME_MODIFY] = np->n_mtim.tv_nsec;
1867 }
1868 }
1869 if (nvap->nva_acl) {
1870 if (flags & NGA_ACL) {
1871 nvaper->nva_acl = kauth_acl_alloc(nvap->nva_acl->acl_entrycount);
1872 if (!nvaper->nva_acl) {
1873 return ENOMEM;
1874 }
1875 bcopy(nvap->nva_acl, nvaper->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl));
1876 } else {
1877 nvaper->nva_acl = NULL;
1878 }
1879 }
1880 return 0;
1881}
1882
1883/*
1884 * When creating file system objects:
1885 * Don't bother setting UID if it's the same as the credential performing the create.
1886 * Don't bother setting GID if it's the same as the directory or credential.
1887 */
1888void
1889nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp, struct vnode_attr *vap, vfs_context_t ctx)
1890{
1891 if (VATTR_IS_ACTIVE(vap, va_uid)) {
1892 if (kauth_cred_getuid(vfs_context_ucred(ctx)) == vap->va_uid) {
1893 VATTR_CLEAR_ACTIVE(vap, va_uid);
1894 VATTR_CLEAR_ACTIVE(vap, va_uuuid);
1895 }
1896 }
1897 if (VATTR_IS_ACTIVE(vap, va_gid)) {
1898 if ((vap->va_gid == dnp->n_vattr.nva_gid) ||
1899 (kauth_cred_getgid(vfs_context_ucred(ctx)) == vap->va_gid)) {
1900 VATTR_CLEAR_ACTIVE(vap, va_gid);
1901 VATTR_CLEAR_ACTIVE(vap, va_guuid);
1902 }
1903 }
1904}
1905
1906/*
1907 * Convert a universal address string to a sockaddr structure.
1908 *
1909 * Universal addresses can be in the following formats:
1910 *
1911 * d = decimal (IPv4)
1912 * x = hexadecimal (IPv6)
1913 * p = port (decimal)
1914 *
1915 * d.d.d.d
1916 * d.d.d.d.p.p
1917 * x:x:x:x:x:x:x:x
1918 * x:x:x:x:x:x:x:x.p.p
1919 * x:x:x:x:x:x:d.d.d.d
1920 * x:x:x:x:x:x:d.d.d.d.p.p
1921 *
1922 * IPv6 strings can also have a series of zeroes elided
1923 * IPv6 strings can also have a %scope suffix at the end (after any port)
1924 *
1925 * rules & exceptions:
1926 * - value before : is hex
1927 * - value before . is dec
1928 * - once . hit, all values are dec
1929 * - hex+port case means value before first dot is actually hex
1930 * - . is always preceded by digits except if last hex was double-colon
1931 *
1932 * scan, converting #s to bytes
1933 * first time a . is encountered, scan the rest to count them.
1934 * 2 dots = just port
1935 * 3 dots = just IPv4 no port
1936 * 5 dots = IPv4 and port
1937 */
1938
1939#define IS_DIGIT(C) \
1940 (((C) >= '0') && ((C) <= '9'))
1941
1942#define IS_XDIGIT(C) \
1943 (IS_DIGIT(C) || \
1944 (((C) >= 'A') && ((C) <= 'F')) || \
1945 (((C) >= 'a') && ((C) <= 'f')))
1946
1947int
1948nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr)
1949{
1950 const char *p, *pd; /* pointers to current character in scan */
1951 const char *pnum; /* pointer to current number to decode */
1952 const char *pscope; /* pointer to IPv6 scope ID */
1953 uint8_t a[18]; /* octet array to store address bytes */
1954 int i; /* index of next octet to decode */
1955 int dci; /* index of octet to insert double-colon zeroes */
1956 int dcount, xdcount; /* count of digits in current number */
1957 int needmore; /* set when we know we need more input (e.g. after colon, period) */
1958 int dots; /* # of dots */
1959 int hex; /* contains hex values */
1960 unsigned long val; /* decoded value */
1961 int s; /* index used for sliding array to insert elided zeroes */
1962
1963 /* AF_LOCAL address are paths that start with '/' or are empty */
1964 if (*uaddr == '/' || *uaddr == '\0') { /* AF_LOCAL address */
1965 struct sockaddr_un *sun = (struct sockaddr_un *)addr;
1966 sun->sun_family = AF_LOCAL;
1967 sun->sun_len = sizeof(struct sockaddr_un);
1968 strlcpy(sun->sun_path, uaddr, sizeof(sun->sun_path));
1969
1970 return 1;
1971 }
1972
1973#define HEXVALUE 0
1974#define DECIMALVALUE 1
1975
1976#define GET(TYPE) \
1977 do { \
1978 if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \
1979 return (0); \
1980 if (((TYPE) == DECIMALVALUE) && xdcount) \
1981 return (0); \
1982 val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \
1983 if (((TYPE) == DECIMALVALUE) && (val >= 256)) \
1984 return (0); \
1985 /* check if there is room left in the array */ \
1986 if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \
1987 return (0); \
1988 if ((TYPE) == HEXVALUE) \
1989 a[i++] = ((val >> 8) & 0xff); \
1990 a[i++] = (val & 0xff); \
1991 } while (0)
1992
1993 hex = 0;
1994 dots = 0;
1995 dci = -1;
1996 i = dcount = xdcount = 0;
1997 pnum = p = uaddr;
1998 pscope = NULL;
1999 needmore = 1;
2000 if ((*p == ':') && (*++p != ':')) { /* if it starts with colon, gotta be a double */
2001 return 0;
2002 }
2003
2004 while (*p) {
2005 if (IS_XDIGIT(*p)) {
2006 dcount++;
2007 if (!IS_DIGIT(*p)) {
2008 xdcount++;
2009 }
2010 needmore = 0;
2011 p++;
2012 } else if (*p == '.') {
2013 /* rest is decimal IPv4 dotted quad and/or port */
2014 if (!dots) {
2015 /* this is the first, so count them */
2016 for (pd = p; *pd; pd++) {
2017 if (*pd == '.') {
2018 if (++dots > 5) {
2019 return 0;
2020 }
2021 } else if (hex && (*pd == '%')) {
2022 break;
2023 } else if ((*pd < '0') || (*pd > '9')) {
2024 return 0;
2025 }
2026 }
2027 if ((dots != 2) && (dots != 3) && (dots != 5)) {
2028 return 0;
2029 }
2030 if (hex && (dots == 2)) { /* hex+port */
2031 if (!dcount && needmore) {
2032 return 0;
2033 }
2034 if (dcount) { /* last hex may be elided zero */
2035 GET(HEXVALUE);
2036 }
2037 } else {
2038 GET(DECIMALVALUE);
2039 }
2040 } else {
2041 GET(DECIMALVALUE);
2042 }
2043 dcount = xdcount = 0;
2044 needmore = 1;
2045 pnum = ++p;
2046 } else if (*p == ':') {
2047 hex = 1;
2048 if (dots) {
2049 return 0;
2050 }
2051 if (!dcount) { /* missing number, probably double colon */
2052 if (dci >= 0) { /* can only have one double colon */
2053 return 0;
2054 }
2055 dci = i;
2056 needmore = 0;
2057 } else {
2058 GET(HEXVALUE);
2059 dcount = xdcount = 0;
2060 needmore = 1;
2061 }
2062 pnum = ++p;
2063 } else if (*p == '%') { /* scope ID delimiter */
2064 if (!hex) {
2065 return 0;
2066 }
2067 p++;
2068 pscope = p;
2069 break;
2070 } else { /* unexpected character */
2071 return 0;
2072 }
2073 }
2074 if (needmore && !dcount) {
2075 return 0;
2076 }
2077 if (dcount) { /* decode trailing number */
2078 GET(dots ? DECIMALVALUE : HEXVALUE);
2079 }
2080 if (dci >= 0) { /* got a double-colon at i, need to insert a range of zeroes */
2081 /* if we got a port, slide to end of array */
2082 /* otherwise, slide to end of address (non-port) values */
2083 int end = ((dots == 2) || (dots == 5)) ? sizeof(a) : (sizeof(a) - 2);
2084 if (i % 2) { /* length of zero range must be multiple of 2 */
2085 return 0;
2086 }
2087 if (i >= end) { /* no room? */
2088 return 0;
2089 }
2090 /* slide (i-dci) numbers up from index dci */
2091 for (s = 0; s < (i - dci); s++) {
2092 a[end - 1 - s] = a[i - 1 - s];
2093 }
2094 /* zero (end-i) numbers at index dci */
2095 for (s = 0; s < (end - i); s++) {
2096 a[dci + s] = 0;
2097 }
2098 i = end;
2099 }
2100
2101 /* copy out resulting socket address */
2102 if (hex) {
2103 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)addr;
2104 if ((((dots == 0) || (dots == 3)) && (i != (sizeof(a) - 2)))) {
2105 return 0;
2106 }
2107 if ((((dots == 2) || (dots == 5)) && (i != sizeof(a)))) {
2108 return 0;
2109 }
2110 bzero(sin6, sizeof(struct sockaddr_in6));
2111 sin6->sin6_len = sizeof(struct sockaddr_in6);
2112 sin6->sin6_family = AF_INET6;
2113 bcopy(a, &sin6->sin6_addr.s6_addr, sizeof(struct in6_addr));
2114 if ((dots == 5) || (dots == 2)) {
2115 sin6->sin6_port = htons((in_port_t)((a[16] << 8) | a[17]));
2116 }
2117 if (pscope) {
2118 for (p = pscope; IS_DIGIT(*p); p++) {
2119 ;
2120 }
2121 if (*p && !IS_DIGIT(*p)) { /* name */
2122 ifnet_t interface = NULL;
2123 if (ifnet_find_by_name(pscope, &interface) == 0) {
2124 sin6->sin6_scope_id = ifnet_index(interface);
2125 }
2126 if (interface) {
2127 ifnet_release(interface);
2128 }
2129 } else { /* decimal number */
2130 sin6->sin6_scope_id = (uint32_t)strtoul(pscope, NULL, 10);
2131 }
2132 /* XXX should we also embed scope id for linklocal? */
2133 }
2134 } else {
2135 struct sockaddr_in *sin = (struct sockaddr_in*)addr;
2136 if ((dots != 3) && (dots != 5)) {
2137 return 0;
2138 }
2139 if ((dots == 3) && (i != 4)) {
2140 return 0;
2141 }
2142 if ((dots == 5) && (i != 6)) {
2143 return 0;
2144 }
2145 bzero(sin, sizeof(struct sockaddr_in));
2146 sin->sin_len = sizeof(struct sockaddr_in);
2147 sin->sin_family = AF_INET;
2148 bcopy(a, &sin->sin_addr.s_addr, sizeof(struct in_addr));
2149 if (dots == 5) {
2150 sin->sin_port = htons((in_port_t)((a[4] << 8) | a[5]));
2151 }
2152 }
2153 return 1;
2154}
2155
2156
2157/* NFS Client debugging support */
2158uint32_t nfs_debug_ctl;
2159
2160#include <libkern/libkern.h>
2161#include <stdarg.h>
2162
2163void
2164nfs_printf(unsigned int facility, unsigned int level, const char *fmt, ...)
2165{
2166 va_list ap;
2167
2168 if (NFS_IS_DBG(facility, level)) {
2169 va_start(ap, fmt);
2170 vprintf(fmt, ap);
2171 va_end(ap);
2172 }
2173}
2174
2175
2176#define DISPLAYLEN 16
2177
2178static bool
2179isprint(int ch)
2180{
2181 return ch >= 0x20 && ch <= 0x7e;
2182}
2183
2184static void
2185hexdump(void *data, size_t len)
2186{
2187 size_t i, j;
2188 unsigned char *d = data;
2189 char *p, disbuf[3 * DISPLAYLEN + 1];
2190
2191 for (i = 0; i < len; i += DISPLAYLEN) {
2192 for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) {
2193 snprintf(p, 4, "%2.2x ", d[i + j]);
2194 }
2195 for (; j < DISPLAYLEN; j++, p += 3) {
2196 snprintf(p, 4, " ");
2197 }
2198 printf("%s ", disbuf);
2199 for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p++) {
2200 snprintf(p, 2, "%c", isprint(d[i + j]) ? d[i + j] : '.');
2201 }
2202 printf("%s\n", disbuf);
2203 }
2204}
2205
2206void
2207nfs_dump_mbuf(const char *func, int lineno, const char *msg, mbuf_t mb)
2208{
2209 mbuf_t m;
2210
2211 printf("%s:%d %s\n", func, lineno, msg);
2212 for (m = mb; m; m = mbuf_next(m)) {
2213 hexdump(mbuf_data(m), mbuf_len(m));
2214 }
2215}
2216
2217/* Is a mount gone away? */
2218int
2219nfs_mount_gone(struct nfsmount *nmp)
2220{
2221 return !nmp || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD));
2222}
2223
2224/*
2225 * Return some of the more significant mount options
2226 * as a string, e.g. "'ro,hard,intr,tcp,vers=3,sec=krb5,deadtimeout=0'
2227 */
2228int
2229nfs_mountopts(struct nfsmount *nmp, char *buf, int buflen)
2230{
2231 int c;
2232
2233 c = snprintf(buf, buflen, "%s,%s,%s,%s,vers=%d,sec=%s,%sdeadtimeout=%d",
2234 (vfs_flags(nmp->nm_mountp) & MNT_RDONLY) ? "ro" : "rw",
2235 NMFLAG(nmp, SOFT) ? "soft" : "hard",
2236 NMFLAG(nmp, INTR) ? "intr" : "nointr",
2237 nmp->nm_sotype == SOCK_STREAM ? "tcp" : "udp",
2238 nmp->nm_vers,
2239 nmp->nm_auth == RPCAUTH_KRB5 ? "krb5" :
2240 nmp->nm_auth == RPCAUTH_KRB5I ? "krb5i" :
2241 nmp->nm_auth == RPCAUTH_KRB5P ? "krb5p" :
2242 nmp->nm_auth == RPCAUTH_SYS ? "sys" : "none",
2243 nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED ? "locks," :
2244 nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED ? "nolocks," :
2245 nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL ? "locallocks," : "",
2246 nmp->nm_deadtimeout);
2247
2248 return c > buflen ? ENOMEM : 0;
2249}
2250
2251#endif /* CONFIG_NFS_CLIENT */
2252
2253/*
2254 * Schedule a callout thread to run an NFS timer function
2255 * interval milliseconds in the future.
2256 */
2257void
2258nfs_interval_timer_start(thread_call_t call, time_t interval)
2259{
2260 uint64_t deadline;
2261
2262 clock_interval_to_deadline((int)interval, 1000 * 1000, &deadline);
2263 thread_call_enter_delayed(call, deadline);
2264}
2265
2266
2267#if CONFIG_NFS_SERVER
2268
2269int nfsrv_cmp_secflavs(struct nfs_sec *, struct nfs_sec *);
2270int nfsrv_hang_addrlist(struct nfs_export *, struct user_nfs_export_args *);
2271int nfsrv_free_netopt(struct radix_node *, void *);
2272int nfsrv_free_addrlist(struct nfs_export *, struct user_nfs_export_args *);
2273struct nfs_export_options *nfsrv_export_lookup(struct nfs_export *, mbuf_t);
2274struct nfs_export *nfsrv_fhtoexport(struct nfs_filehandle *);
2275struct nfs_user_stat_node *nfsrv_get_user_stat_node(struct nfs_active_user_list *, struct sockaddr *, uid_t);
2276void nfsrv_init_user_list(struct nfs_active_user_list *);
2277void nfsrv_free_user_list(struct nfs_active_user_list *);
2278
2279/*
2280 * add NFSv3 WCC data to an mbuf chain
2281 */
2282int
2283nfsm_chain_add_wcc_data_f(
2284 struct nfsrv_descript *nd,
2285 struct nfsm_chain *nmc,
2286 int preattrerr,
2287 struct vnode_attr *prevap,
2288 int postattrerr,
2289 struct vnode_attr *postvap)
2290{
2291 int error = 0;
2292
2293 if (preattrerr) {
2294 nfsm_chain_add_32(error, nmc, FALSE);
2295 } else {
2296 nfsm_chain_add_32(error, nmc, TRUE);
2297 nfsm_chain_add_64(error, nmc, prevap->va_data_size);
2298 nfsm_chain_add_time(error, nmc, NFS_VER3, &prevap->va_modify_time);
2299 nfsm_chain_add_time(error, nmc, NFS_VER3, &prevap->va_change_time);
2300 }
2301 nfsm_chain_add_postop_attr(error, nd, nmc, postattrerr, postvap);
2302
2303 return error;
2304}
2305
2306/*
2307 * Extract a lookup path from the given mbufs and store it in
2308 * a newly allocated buffer saved in the given nameidata structure.
2309 */
2310int
2311nfsm_chain_get_path_namei(
2312 struct nfsm_chain *nmc,
2313 uint32_t len,
2314 struct nameidata *nip)
2315{
2316 struct componentname *cnp = &nip->ni_cnd;
2317 int error = 0;
2318 char *cp;
2319
2320 if (len > (MAXPATHLEN - 1)) {
2321 return ENAMETOOLONG;
2322 }
2323
2324 /*
2325 * Get a buffer for the name to be translated, and copy the
2326 * name into the buffer.
2327 */
2328 cnp->cn_pnbuf = zalloc(ZV_NAMEI);
2329 cnp->cn_pnlen = MAXPATHLEN;
2330 cnp->cn_flags |= HASBUF;
2331
2332 /* Copy the name from the mbuf list to the string */
2333 cp = cnp->cn_pnbuf;
2334 nfsm_chain_get_opaque(error, nmc, len, cp);
2335 if (error) {
2336 goto out;
2337 }
2338 cnp->cn_pnbuf[len] = '\0';
2339
2340 /* sanity check the string */
2341 if ((strlen(cp) != len) || strchr(cp, '/')) {
2342 error = EACCES;
2343 }
2344out:
2345 if (error) {
2346 if (cnp->cn_pnbuf) {
2347 NFS_ZFREE(ZV_NAMEI, cnp->cn_pnbuf);
2348 }
2349 cnp->cn_flags &= ~HASBUF;
2350 } else {
2351 nip->ni_pathlen = len;
2352 }
2353 return error;
2354}
2355
2356/*
2357 * Set up nameidata for a lookup() call and do it.
2358 */
2359int
2360nfsrv_namei(
2361 struct nfsrv_descript *nd,
2362 vfs_context_t ctx,
2363 struct nameidata *nip,
2364 struct nfs_filehandle *nfhp,
2365 vnode_t *retdirp,
2366 struct nfs_export **nxp,
2367 struct nfs_export_options **nxop)
2368{
2369 vnode_t dp;
2370 int error;
2371 struct componentname *cnp = &nip->ni_cnd;
2372 uint32_t cnflags;
2373 char *tmppn;
2374
2375 *retdirp = NULL;
2376
2377 /*
2378 * Extract and set starting directory.
2379 */
2380 error = nfsrv_fhtovp(nfhp, nd, &dp, nxp, nxop);
2381 if (error) {
2382 goto out;
2383 }
2384 error = nfsrv_credcheck(nd, ctx, *nxp, *nxop);
2385 if (error || (vnode_vtype(dp) != VDIR)) {
2386 vnode_put(dp);
2387 error = ENOTDIR;
2388 goto out;
2389 }
2390 *retdirp = dp;
2391
2392 nip->ni_cnd.cn_context = ctx;
2393
2394 if (*nxop && ((*nxop)->nxo_flags & NX_READONLY)) {
2395 cnp->cn_flags |= RDONLY;
2396 }
2397
2398 cnp->cn_flags |= NOCROSSMOUNT;
2399 cnp->cn_nameptr = cnp->cn_pnbuf;
2400 nip->ni_usedvp = nip->ni_startdir = dp;
2401 nip->ni_rootdir = rootvnode;
2402
2403 /*
2404 * And call lookup() to do the real work
2405 */
2406 cnflags = nip->ni_cnd.cn_flags; /* store in case we have to restore */
2407 while ((error = lookup(nip)) == ERECYCLE) {
2408 nip->ni_cnd.cn_flags = cnflags;
2409 cnp->cn_nameptr = cnp->cn_pnbuf;
2410 nip->ni_usedvp = nip->ni_dvp = nip->ni_startdir = dp;
2411 }
2412 if (error) {
2413 goto out;
2414 }
2415
2416 /* Check for encountering a symbolic link */
2417 if (cnp->cn_flags & ISSYMLINK) {
2418 if (cnp->cn_flags & (LOCKPARENT | WANTPARENT)) {
2419 vnode_put(nip->ni_dvp);
2420 }
2421 if (nip->ni_vp) {
2422 vnode_put(nip->ni_vp);
2423 nip->ni_vp = NULL;
2424 }
2425 error = EINVAL;
2426 }
2427out:
2428 if (error) {
2429 tmppn = cnp->cn_pnbuf;
2430 cnp->cn_pnbuf = NULL;
2431 cnp->cn_flags &= ~HASBUF;
2432 NFS_ZFREE(ZV_NAMEI, tmppn);
2433 }
2434 return error;
2435}
2436
2437/*
2438 * A fiddled version of m_adj() that ensures null fill to a 4-byte
2439 * boundary and only trims off the back end
2440 */
2441void
2442nfsm_adj(mbuf_t mp, int len, int nul)
2443{
2444 mbuf_t m, mnext;
2445 int count, i;
2446 long mlen;
2447 char *cp;
2448
2449 /*
2450 * Trim from tail. Scan the mbuf chain,
2451 * calculating its length and finding the last mbuf.
2452 * If the adjustment only affects this mbuf, then just
2453 * adjust and return. Otherwise, rescan and truncate
2454 * after the remaining size.
2455 */
2456 count = 0;
2457 m = mp;
2458 for (;;) {
2459 mlen = mbuf_len(m);
2460 count += mlen;
2461 mnext = mbuf_next(m);
2462 if (mnext == NULL) {
2463 break;
2464 }
2465 m = mnext;
2466 }
2467 if (mlen > len) {
2468 mlen -= len;
2469 mbuf_setlen(m, mlen);
2470 if (nul > 0) {
2471 cp = (caddr_t)mbuf_data(m) + mlen - nul;
2472 for (i = 0; i < nul; i++) {
2473 *cp++ = '\0';
2474 }
2475 }
2476 return;
2477 }
2478 count -= len;
2479 if (count < 0) {
2480 count = 0;
2481 }
2482 /*
2483 * Correct length for chain is "count".
2484 * Find the mbuf with last data, adjust its length,
2485 * and toss data from remaining mbufs on chain.
2486 */
2487 for (m = mp; m; m = mbuf_next(m)) {
2488 mlen = mbuf_len(m);
2489 if (mlen >= count) {
2490 mlen = count;
2491 mbuf_setlen(m, count);
2492 if (nul > 0) {
2493 cp = (caddr_t)mbuf_data(m) + mlen - nul;
2494 for (i = 0; i < nul; i++) {
2495 *cp++ = '\0';
2496 }
2497 }
2498 break;
2499 }
2500 count -= mlen;
2501 }
2502 for (m = mbuf_next(m); m; m = mbuf_next(m)) {
2503 mbuf_setlen(m, 0);
2504 }
2505}
2506
2507/*
2508 * Trim the header out of the mbuf list and trim off any trailing
2509 * junk so that the mbuf list has only the write data.
2510 */
2511int
2512nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen)
2513{
2514 int cnt = 0;
2515 long dlen, adjust;
2516 caddr_t data;
2517 mbuf_t m;
2518
2519 if (mlen) {
2520 *mlen = 0;
2521 }
2522
2523 /* trim header */
2524 for (m = nmc->nmc_mhead; m && (m != nmc->nmc_mcur); m = mbuf_next(m)) {
2525 mbuf_setlen(m, 0);
2526 }
2527 if (!m) {
2528 return EIO;
2529 }
2530
2531 /* trim current mbuf */
2532 data = mbuf_data(m);
2533 dlen = mbuf_len(m);
2534 adjust = nmc->nmc_ptr - data;
2535 dlen -= adjust;
2536 if ((dlen > 0) && (adjust > 0)) {
2537 if (mbuf_setdata(m, nmc->nmc_ptr, dlen)) {
2538 return EIO;
2539 }
2540 } else {
2541 mbuf_setlen(m, dlen);
2542 }
2543
2544 /* skip next len bytes */
2545 for (; m && (cnt < len); m = mbuf_next(m)) {
2546 dlen = mbuf_len(m);
2547 cnt += dlen;
2548 if (cnt > len) {
2549 /* truncate to end of data */
2550 mbuf_setlen(m, dlen - (cnt - len));
2551 if (m == nmc->nmc_mcur) {
2552 nmc->nmc_left -= (cnt - len);
2553 }
2554 cnt = len;
2555 }
2556 }
2557 if (mlen) {
2558 *mlen = cnt;
2559 }
2560
2561 /* trim any trailing data */
2562 if (m == nmc->nmc_mcur) {
2563 nmc->nmc_left = 0;
2564 }
2565 for (; m; m = mbuf_next(m)) {
2566 mbuf_setlen(m, 0);
2567 }
2568
2569 return 0;
2570}
2571
2572int
2573nfsm_chain_add_fattr(
2574 struct nfsrv_descript *nd,
2575 struct nfsm_chain *nmc,
2576 struct vnode_attr *vap)
2577{
2578 int error = 0;
2579
2580 // XXX Should we assert here that all fields are supported?
2581
2582 nfsm_chain_add_32(error, nmc, vtonfs_type(vap->va_type, nd->nd_vers));
2583 if (nd->nd_vers == NFS_VER3) {
2584 nfsm_chain_add_32(error, nmc, vap->va_mode & 07777);
2585 } else {
2586 nfsm_chain_add_32(error, nmc, vtonfsv2_mode(vap->va_type, vap->va_mode));
2587 }
2588 nfsm_chain_add_32(error, nmc, vap->va_nlink);
2589 nfsm_chain_add_32(error, nmc, vap->va_uid);
2590 nfsm_chain_add_32(error, nmc, vap->va_gid);
2591 if (nd->nd_vers == NFS_VER3) {
2592 nfsm_chain_add_64(error, nmc, vap->va_data_size);
2593 nfsm_chain_add_64(error, nmc, vap->va_data_alloc);
2594 nfsm_chain_add_32(error, nmc, major(vap->va_rdev));
2595 nfsm_chain_add_32(error, nmc, minor(vap->va_rdev));
2596 nfsm_chain_add_64(error, nmc, vap->va_fsid);
2597 nfsm_chain_add_64(error, nmc, vap->va_fileid);
2598 } else {
2599 nfsm_chain_add_32(error, nmc, vap->va_data_size);
2600 nfsm_chain_add_32(error, nmc, NFS_FABLKSIZE);
2601 if (vap->va_type == VFIFO) {
2602 nfsm_chain_add_32(error, nmc, 0xffffffff);
2603 } else {
2604 nfsm_chain_add_32(error, nmc, vap->va_rdev);
2605 }
2606 nfsm_chain_add_32(error, nmc, vap->va_data_alloc / NFS_FABLKSIZE);
2607 nfsm_chain_add_32(error, nmc, vap->va_fsid);
2608 nfsm_chain_add_32(error, nmc, vap->va_fileid);
2609 }
2610 nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_access_time);
2611 nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_modify_time);
2612 nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_change_time);
2613
2614 return error;
2615}
2616
2617int
2618nfsm_chain_get_sattr(
2619 struct nfsrv_descript *nd,
2620 struct nfsm_chain *nmc,
2621 struct vnode_attr *vap)
2622{
2623 int error = 0;
2624 uint32_t val = 0;
2625 uint64_t val64 = 0;
2626 struct timespec now;
2627
2628 if (nd->nd_vers == NFS_VER2) {
2629 /*
2630 * There is/was a bug in the Sun client that puts 0xffff in the mode
2631 * field of sattr when it should put in 0xffffffff. The u_short
2632 * doesn't sign extend. So check the low order 2 bytes for 0xffff.
2633 */
2634 nfsm_chain_get_32(error, nmc, val);
2635 if ((val & 0xffff) != 0xffff) {
2636 VATTR_SET(vap, va_mode, val & 07777);
2637 /* save the "type" bits for NFSv2 create */
2638 VATTR_SET(vap, va_type, IFTOVT(val));
2639 VATTR_CLEAR_ACTIVE(vap, va_type);
2640 }
2641 nfsm_chain_get_32(error, nmc, val);
2642 if (val != (uint32_t)-1) {
2643 VATTR_SET(vap, va_uid, val);
2644 }
2645 nfsm_chain_get_32(error, nmc, val);
2646 if (val != (uint32_t)-1) {
2647 VATTR_SET(vap, va_gid, val);
2648 }
2649 /* save the "size" bits for NFSv2 create (even if they appear unset) */
2650 nfsm_chain_get_32(error, nmc, val);
2651 VATTR_SET(vap, va_data_size, val);
2652 if (val == (uint32_t)-1) {
2653 VATTR_CLEAR_ACTIVE(vap, va_data_size);
2654 }
2655 nfsm_chain_get_time(error, nmc, NFS_VER2,
2656 vap->va_access_time.tv_sec,
2657 vap->va_access_time.tv_nsec);
2658 if (vap->va_access_time.tv_sec != -1) {
2659 VATTR_SET_ACTIVE(vap, va_access_time);
2660 }
2661 nfsm_chain_get_time(error, nmc, NFS_VER2,
2662 vap->va_modify_time.tv_sec,
2663 vap->va_modify_time.tv_nsec);
2664 if (vap->va_modify_time.tv_sec != -1) {
2665 VATTR_SET_ACTIVE(vap, va_modify_time);
2666 }
2667 return error;
2668 }
2669
2670 /* NFSv3 */
2671 nfsm_chain_get_32(error, nmc, val);
2672 if (val) {
2673 nfsm_chain_get_32(error, nmc, val);
2674 VATTR_SET(vap, va_mode, val & 07777);
2675 }
2676 nfsm_chain_get_32(error, nmc, val);
2677 if (val) {
2678 nfsm_chain_get_32(error, nmc, val);
2679 VATTR_SET(vap, va_uid, val);
2680 }
2681 nfsm_chain_get_32(error, nmc, val);
2682 if (val) {
2683 nfsm_chain_get_32(error, nmc, val);
2684 VATTR_SET(vap, va_gid, val);
2685 }
2686 nfsm_chain_get_32(error, nmc, val);
2687 if (val) {
2688 nfsm_chain_get_64(error, nmc, val64);
2689 VATTR_SET(vap, va_data_size, val64);
2690 }
2691 nanotime(&now);
2692 nfsm_chain_get_32(error, nmc, val);
2693 switch (val) {
2694 case NFS_TIME_SET_TO_CLIENT:
2695 nfsm_chain_get_time(error, nmc, nd->nd_vers,
2696 vap->va_access_time.tv_sec,
2697 vap->va_access_time.tv_nsec);
2698 VATTR_SET_ACTIVE(vap, va_access_time);
2699 vap->va_vaflags &= ~VA_UTIMES_NULL;
2700 break;
2701 case NFS_TIME_SET_TO_SERVER:
2702 VATTR_SET(vap, va_access_time, now);
2703 vap->va_vaflags |= VA_UTIMES_NULL;
2704 break;
2705 }
2706 nfsm_chain_get_32(error, nmc, val);
2707 switch (val) {
2708 case NFS_TIME_SET_TO_CLIENT:
2709 nfsm_chain_get_time(error, nmc, nd->nd_vers,
2710 vap->va_modify_time.tv_sec,
2711 vap->va_modify_time.tv_nsec);
2712 VATTR_SET_ACTIVE(vap, va_modify_time);
2713 vap->va_vaflags &= ~VA_UTIMES_NULL;
2714 break;
2715 case NFS_TIME_SET_TO_SERVER:
2716 VATTR_SET(vap, va_modify_time, now);
2717 if (!VATTR_IS_ACTIVE(vap, va_access_time)) {
2718 vap->va_vaflags |= VA_UTIMES_NULL;
2719 }
2720 break;
2721 }
2722
2723 return error;
2724}
2725
2726/*
2727 * Compare two security flavor structs
2728 */
2729int
2730nfsrv_cmp_secflavs(struct nfs_sec *sf1, struct nfs_sec *sf2)
2731{
2732 int i;
2733
2734 if (sf1->count != sf2->count) {
2735 return 1;
2736 }
2737 for (i = 0; i < sf1->count; i++) {
2738 if (sf1->flavors[i] != sf2->flavors[i]) {
2739 return 1;
2740 }
2741 }
2742 return 0;
2743}
2744
2745/*
2746 * Build hash lists of net addresses and hang them off the NFS export.
2747 * Called by nfsrv_export() to set up the lists of export addresses.
2748 */
2749int
2750nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa)
2751{
2752 struct nfs_export_net_args nxna;
2753 struct nfs_netopt *no, *rn_no;
2754 struct radix_node_head *rnh;
2755 struct radix_node *rn;
2756 struct sockaddr *saddr, *smask;
2757 struct domain *dom;
2758 size_t i, ss_minsize;
2759 int error;
2760 unsigned int net;
2761 user_addr_t uaddr;
2762 kauth_cred_t cred;
2763
2764 uaddr = unxa->nxa_nets;
2765 ss_minsize = sizeof(((struct sockaddr_storage *)0)->ss_len) + sizeof(((struct sockaddr_storage *)0)->ss_family);
2766 for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) {
2767 error = copyin(uaddr, &nxna, sizeof(nxna));
2768 if (error) {
2769 return error;
2770 }
2771
2772 if (nxna.nxna_addr.ss_len > sizeof(struct sockaddr_storage) ||
2773 (nxna.nxna_addr.ss_len != 0 && nxna.nxna_addr.ss_len < ss_minsize) ||
2774 nxna.nxna_mask.ss_len > sizeof(struct sockaddr_storage) ||
2775 (nxna.nxna_mask.ss_len != 0 && nxna.nxna_mask.ss_len < ss_minsize) ||
2776 nxna.nxna_addr.ss_family > AF_MAX ||
2777 nxna.nxna_mask.ss_family > AF_MAX) {
2778 return EINVAL;
2779 }
2780
2781 if (nxna.nxna_flags & (NX_MAPROOT | NX_MAPALL)) {
2782 struct posix_cred temp_pcred;
2783 bzero(&temp_pcred, sizeof(temp_pcred));
2784 temp_pcred.cr_uid = nxna.nxna_cred.cr_uid;
2785 temp_pcred.cr_ngroups = nxna.nxna_cred.cr_ngroups;
2786 for (i = 0; i < (size_t)nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++) {
2787 temp_pcred.cr_groups[i] = nxna.nxna_cred.cr_groups[i];
2788 }
2789 cred = posix_cred_create(&temp_pcred);
2790 if (!IS_VALID_CRED(cred)) {
2791 return ENOMEM;
2792 }
2793 } else {
2794 cred = NOCRED;
2795 }
2796
2797 if (nxna.nxna_addr.ss_len == 0) {
2798 /* No address means this is a default/world export */
2799 if (nx->nx_flags & NX_DEFAULTEXPORT) {
2800 if (IS_VALID_CRED(cred)) {
2801 kauth_cred_unref(&cred);
2802 }
2803 return EEXIST;
2804 }
2805 nx->nx_flags |= NX_DEFAULTEXPORT;
2806 nx->nx_defopt.nxo_flags = nxna.nxna_flags;
2807 nx->nx_defopt.nxo_cred = cred;
2808 bcopy(&nxna.nxna_sec, &nx->nx_defopt.nxo_sec, sizeof(struct nfs_sec));
2809 nx->nx_expcnt++;
2810 continue;
2811 }
2812
2813 i = sizeof(struct nfs_netopt);
2814 i += nxna.nxna_addr.ss_len + nxna.nxna_mask.ss_len;
2815 MALLOC(no, struct nfs_netopt *, i, M_NETADDR, M_WAITOK);
2816 if (!no) {
2817 if (IS_VALID_CRED(cred)) {
2818 kauth_cred_unref(&cred);
2819 }
2820 return ENOMEM;
2821 }
2822 bzero(no, sizeof(struct nfs_netopt));
2823 no->no_opt.nxo_flags = nxna.nxna_flags;
2824 no->no_opt.nxo_cred = cred;
2825 bcopy(&nxna.nxna_sec, &no->no_opt.nxo_sec, sizeof(struct nfs_sec));
2826
2827 saddr = (struct sockaddr *)(no + 1);
2828 bcopy(&nxna.nxna_addr, saddr, nxna.nxna_addr.ss_len);
2829 if (nxna.nxna_mask.ss_len) {
2830 smask = (struct sockaddr *)((caddr_t)saddr + nxna.nxna_addr.ss_len);
2831 bcopy(&nxna.nxna_mask, smask, nxna.nxna_mask.ss_len);
2832 } else {
2833 smask = NULL;
2834 }
2835 sa_family_t family = saddr->sa_family;
2836 if ((rnh = nx->nx_rtable[family]) == 0) {
2837 /*
2838 * Seems silly to initialize every AF when most are not
2839 * used, do so on demand here
2840 */
2841 TAILQ_FOREACH(dom, &domains, dom_entry) {
2842 if (dom->dom_family == family && dom->dom_rtattach) {
2843 dom->dom_rtattach((void **)&nx->nx_rtable[family],
2844 dom->dom_rtoffset);
2845 break;
2846 }
2847 }
2848 if ((rnh = nx->nx_rtable[family]) == 0) {
2849 if (IS_VALID_CRED(cred)) {
2850 kauth_cred_unref(&cred);
2851 }
2852 _FREE(no, M_NETADDR);
2853 return ENOBUFS;
2854 }
2855 }
2856 rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, no->no_rnodes);
2857 if (rn == 0) {
2858 /*
2859 * One of the reasons that rnh_addaddr may fail is that
2860 * the entry already exists. To check for this case, we
2861 * look up the entry to see if it is there. If so, we
2862 * do not need to make a new entry but do continue.
2863 *
2864 * XXX should this be rnh_lookup() instead?
2865 */
2866 int matched = 0;
2867 rn = (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh);
2868 rn_no = (struct nfs_netopt *)rn;
2869 if (rn != 0 && (rn->rn_flags & RNF_ROOT) == 0 &&
2870 (rn_no->no_opt.nxo_flags == nxna.nxna_flags) &&
2871 (!nfsrv_cmp_secflavs(&rn_no->no_opt.nxo_sec, &nxna.nxna_sec))) {
2872 kauth_cred_t cred2 = rn_no->no_opt.nxo_cred;
2873 if (cred == cred2) {
2874 /* creds are same (or both NULL) */
2875 matched = 1;
2876 } else if (cred && cred2 && (kauth_cred_getuid(cred) == kauth_cred_getuid(cred2))) {
2877 /*
2878 * Now compare the effective and
2879 * supplementary groups...
2880 *
2881 * Note: This comparison, as written,
2882 * does not correctly indicate that
2883 * the groups are equivalent, since
2884 * other than the first supplementary
2885 * group, which is also the effective
2886 * group, order on the remaining groups
2887 * doesn't matter, and this is an
2888 * ordered compare.
2889 */
2890 gid_t groups[NGROUPS];
2891 gid_t groups2[NGROUPS];
2892 size_t groupcount = NGROUPS;
2893 size_t group2count = NGROUPS;
2894
2895 if (!kauth_cred_getgroups(cred, groups, &groupcount) &&
2896 !kauth_cred_getgroups(cred2, groups2, &group2count) &&
2897 groupcount == group2count) {
2898 for (i = 0; i < group2count; i++) {
2899 if (groups[i] != groups2[i]) {
2900 break;
2901 }
2902 }
2903 if (i >= group2count || i >= NGROUPS) {
2904 matched = 1;
2905 }
2906 }
2907 }
2908 }
2909 if (IS_VALID_CRED(cred)) {
2910 kauth_cred_unref(&cred);
2911 }
2912 _FREE(no, M_NETADDR);
2913 if (matched) {
2914 continue;
2915 }
2916 return EPERM;
2917 }
2918 nx->nx_expcnt++;
2919 }
2920
2921 return 0;
2922}
2923
2924/*
2925 * In order to properly track an export's netopt count, we need to pass
2926 * an additional argument to nfsrv_free_netopt() so that it can decrement
2927 * the export's netopt count.
2928 */
2929struct nfsrv_free_netopt_arg {
2930 uint32_t *cnt;
2931 struct radix_node_head *rnh;
2932};
2933
2934int
2935nfsrv_free_netopt(struct radix_node *rn, void *w)
2936{
2937 struct nfsrv_free_netopt_arg *fna = (struct nfsrv_free_netopt_arg *)w;
2938 struct radix_node_head *rnh = fna->rnh;
2939 uint32_t *cnt = fna->cnt;
2940 struct nfs_netopt *nno = (struct nfs_netopt *)rn;
2941
2942 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
2943 if (IS_VALID_CRED(nno->no_opt.nxo_cred)) {
2944 kauth_cred_unref(&nno->no_opt.nxo_cred);
2945 }
2946 _FREE((caddr_t)rn, M_NETADDR);
2947 *cnt -= 1;
2948 return 0;
2949}
2950
2951/*
2952 * Free the net address hash lists that are hanging off the mount points.
2953 */
2954int
2955nfsrv_free_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa)
2956{
2957 struct nfs_export_net_args nxna;
2958 struct radix_node_head *rnh;
2959 struct radix_node *rn;
2960 struct nfsrv_free_netopt_arg fna;
2961 struct nfs_netopt *nno;
2962 size_t ss_minsize;
2963 user_addr_t uaddr;
2964 unsigned int net;
2965 int i, error;
2966
2967 if (!unxa || !unxa->nxa_netcount) {
2968 /* delete everything */
2969 for (i = 0; i <= AF_MAX; i++) {
2970 if ((rnh = nx->nx_rtable[i])) {
2971 fna.rnh = rnh;
2972 fna.cnt = &nx->nx_expcnt;
2973 (*rnh->rnh_walktree)(rnh, nfsrv_free_netopt, (caddr_t)&fna);
2974 _FREE((caddr_t)rnh, M_RTABLE);
2975 nx->nx_rtable[i] = 0;
2976 }
2977 }
2978 return 0;
2979 }
2980
2981 /* delete only the exports specified */
2982 uaddr = unxa->nxa_nets;
2983 ss_minsize = sizeof(((struct sockaddr_storage *)0)->ss_len) + sizeof(((struct sockaddr_storage *)0)->ss_family);
2984 for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) {
2985 error = copyin(uaddr, &nxna, sizeof(nxna));
2986 if (error) {
2987 return error;
2988 }
2989
2990 if (nxna.nxna_addr.ss_len == 0) {
2991 /* No address means this is a default/world export */
2992 if (nx->nx_flags & NX_DEFAULTEXPORT) {
2993 nx->nx_flags &= ~NX_DEFAULTEXPORT;
2994 if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
2995 kauth_cred_unref(&nx->nx_defopt.nxo_cred);
2996 }
2997 nx->nx_expcnt--;
2998 }
2999 continue;
3000 }
3001
3002 if (nxna.nxna_addr.ss_len > sizeof(struct sockaddr_storage) ||
3003 (nxna.nxna_addr.ss_len != 0 && nxna.nxna_addr.ss_len < ss_minsize) ||
3004 nxna.nxna_addr.ss_family > AF_MAX) {
3005 printf("nfsrv_free_addrlist: invalid socket address (%u)\n", net);
3006 continue;
3007 }
3008
3009 if (nxna.nxna_mask.ss_len > sizeof(struct sockaddr_storage) ||
3010 (nxna.nxna_mask.ss_len != 0 && nxna.nxna_mask.ss_len < ss_minsize) ||
3011 nxna.nxna_mask.ss_family > AF_MAX) {
3012 printf("nfsrv_free_addrlist: invalid socket mask (%u)\n", net);
3013 continue;
3014 }
3015
3016 if ((rnh = nx->nx_rtable[nxna.nxna_addr.ss_family]) == 0) {
3017 /* AF not initialized? */
3018 if (!(unxa->nxa_flags & NXA_ADD)) {
3019 printf("nfsrv_free_addrlist: address not found (0)\n");
3020 }
3021 continue;
3022 }
3023
3024 rn = (*rnh->rnh_lookup)(&nxna.nxna_addr,
3025 nxna.nxna_mask.ss_len ? &nxna.nxna_mask : NULL, rnh);
3026 if (!rn || (rn->rn_flags & RNF_ROOT)) {
3027 if (!(unxa->nxa_flags & NXA_ADD)) {
3028 printf("nfsrv_free_addrlist: address not found (1)\n");
3029 }
3030 continue;
3031 }
3032
3033 (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh);
3034 nno = (struct nfs_netopt *)rn;
3035 if (IS_VALID_CRED(nno->no_opt.nxo_cred)) {
3036 kauth_cred_unref(&nno->no_opt.nxo_cred);
3037 }
3038 _FREE((caddr_t)rn, M_NETADDR);
3039
3040 nx->nx_expcnt--;
3041 if (nx->nx_expcnt == ((nx->nx_flags & NX_DEFAULTEXPORT) ? 1 : 0)) {
3042 /* no more entries in rnh, so free it up */
3043 _FREE((caddr_t)rnh, M_RTABLE);
3044 nx->nx_rtable[nxna.nxna_addr.ss_family] = 0;
3045 }
3046 }
3047
3048 return 0;
3049}
3050
3051void enablequotas(struct mount *mp, vfs_context_t ctx); // XXX
3052
3053#define DATA_VOLUME_MP "/System/Volumes/Data" // PLATFORM_DATA_VOLUME_MOUNT_POINT
3054
3055int
3056nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx)
3057{
3058 int error = 0;
3059 size_t pathlen, nxfs_pathlen;
3060 struct nfs_exportfs *nxfs, *nxfs2, *nxfs3;
3061 struct nfs_export *nx, *nx2, *nx3;
3062 struct nfs_filehandle nfh;
3063 struct nameidata mnd, xnd;
3064 vnode_t mvp = NULL, xvp = NULL;
3065 mount_t mp = NULL;
3066 char path[MAXPATHLEN], *nxfs_path;
3067 char fl_pathbuff[MAXPATHLEN];
3068 int fl_pathbuff_len = MAXPATHLEN;
3069 int expisroot;
3070 size_t datavol_len = strlen(DATA_VOLUME_MP);
3071
3072 if (unxa->nxa_flags == NXA_CHECK) {
3073 /* just check if the path is an NFS-exportable file system */
3074 error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen);
3075 if (error) {
3076 return error;
3077 }
3078 NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
3079 UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
3080 error = namei(&mnd);
3081 if (error) {
3082 return error;
3083 }
3084 mvp = mnd.ni_vp;
3085 mp = vnode_mount(mvp);
3086 /* make sure it's the root of a file system */
3087 if (!vnode_isvroot(mvp)) {
3088 error = EINVAL;
3089 }
3090 /* make sure the file system is NFS-exportable */
3091 if (!error) {
3092 nfh.nfh_len = NFSV3_MAX_FID_SIZE;
3093 error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL);
3094 }
3095 if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) {
3096 error = EIO;
3097 }
3098 if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) {
3099 error = EISDIR;
3100 }
3101 vnode_put(mvp);
3102 nameidone(&mnd);
3103 return error;
3104 }
3105
3106 /* all other operations: must be super user */
3107 if ((error = vfs_context_suser(ctx))) {
3108 return error;
3109 }
3110
3111 if (unxa->nxa_flags & NXA_DELETE_ALL) {
3112 /* delete all exports on all file systems */
3113 lck_rw_lock_exclusive(&nfsrv_export_rwlock);
3114 while ((nxfs = LIST_FIRST(&nfsrv_exports))) {
3115 mp = vfs_getvfs_by_mntonname(nxfs->nxfs_path);
3116 if (mp) {
3117 vfs_clearflags(mp, MNT_EXPORTED);
3118 mount_iterdrop(mp);
3119 mp = NULL;
3120 }
3121 /* delete all exports on this file system */
3122 while ((nx = LIST_FIRST(&nxfs->nxfs_exports))) {
3123 LIST_REMOVE(nx, nx_next);
3124 LIST_REMOVE(nx, nx_hash);
3125 /* delete all netopts for this export */
3126 nfsrv_free_addrlist(nx, NULL);
3127 nx->nx_flags &= ~NX_DEFAULTEXPORT;
3128 if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
3129 kauth_cred_unref(&nx->nx_defopt.nxo_cred);
3130 }
3131 /* free active user list for this export */
3132 nfsrv_free_user_list(&nx->nx_user_list);
3133 FREE(nx->nx_path, M_TEMP);
3134 FREE(nx, M_TEMP);
3135 }
3136 LIST_REMOVE(nxfs, nxfs_next);
3137 FREE(nxfs->nxfs_path, M_TEMP);
3138 FREE(nxfs, M_TEMP);
3139 }
3140 if (nfsrv_export_hashtbl) {
3141 /* all exports deleted, clean up export hash table */
3142 FREE(nfsrv_export_hashtbl, M_TEMP);
3143 nfsrv_export_hashtbl = NULL;
3144 }
3145 lck_rw_done(&nfsrv_export_rwlock);
3146 return 0;
3147 }
3148
3149 error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen);
3150 if (error) {
3151 return error;
3152 }
3153
3154 lck_rw_lock_exclusive(&nfsrv_export_rwlock);
3155
3156 /* init export hash table if not already */
3157 if (!nfsrv_export_hashtbl) {
3158 if (nfsrv_export_hash_size <= 0) {
3159 nfsrv_export_hash_size = NFSRVEXPHASHSZ;
3160 }
3161 nfsrv_export_hashtbl = hashinit(nfsrv_export_hash_size, M_TEMP, &nfsrv_export_hash);
3162 }
3163
3164 // first check if we've already got an exportfs with the given ID
3165 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
3166 if (nxfs->nxfs_id == unxa->nxa_fsid) {
3167 break;
3168 }
3169 }
3170 if (nxfs) {
3171 /* verify exported FS path matches given path */
3172 if (strncmp(path, nxfs->nxfs_path, MAXPATHLEN) &&
3173 (strncmp(path, DATA_VOLUME_MP, datavol_len) || strncmp(path + datavol_len, nxfs->nxfs_path, MAXPATHLEN - datavol_len))) {
3174 error = EEXIST;
3175 goto unlock_out;
3176 }
3177 if ((unxa->nxa_flags & (NXA_ADD | NXA_OFFLINE)) == NXA_ADD) {
3178 /* find exported FS root vnode */
3179 NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
3180 UIO_SYSSPACE, CAST_USER_ADDR_T(nxfs->nxfs_path), ctx);
3181 error = namei(&mnd);
3182 if (error) {
3183 goto unlock_out;
3184 }
3185 mvp = mnd.ni_vp;
3186 /* make sure it's (still) the root of a file system */
3187 if (!vnode_isvroot(mvp)) {
3188 error = EINVAL;
3189 goto out;
3190 }
3191 /* if adding, verify that the mount is still what we expect */
3192 mp = vfs_getvfs_by_mntonname(nxfs->nxfs_path);
3193 if (!mp) {
3194 /* check for firmlink-free path */
3195 if (vn_getpath_no_firmlink(mvp, fl_pathbuff, &fl_pathbuff_len) == 0 &&
3196 fl_pathbuff_len > 0 &&
3197 !strncmp(nxfs->nxfs_path, fl_pathbuff, MAXPATHLEN)) {
3198 mp = vfs_getvfs_by_mntonname(vnode_mount(mvp)->mnt_vfsstat.f_mntonname);
3199 }
3200 }
3201 if (mp) {
3202 mount_ref(mp, 0);
3203 mount_iterdrop(mp);
3204 }
3205 /* sanity check: this should be same mount */
3206 if (mp != vnode_mount(mvp)) {
3207 error = EINVAL;
3208 goto out;
3209 }
3210 }
3211 } else {
3212 /* no current exported file system with that ID */
3213 if (!(unxa->nxa_flags & NXA_ADD)) {
3214 error = ENOENT;
3215 goto unlock_out;
3216 }
3217
3218 /* find exported FS root vnode */
3219 NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
3220 UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
3221 error = namei(&mnd);
3222 if (error) {
3223 if (!(unxa->nxa_flags & NXA_OFFLINE)) {
3224 goto unlock_out;
3225 }
3226 } else {
3227 mvp = mnd.ni_vp;
3228 /* make sure it's the root of a file system */
3229 if (!vnode_isvroot(mvp)) {
3230 /* bail if not marked offline */
3231 if (!(unxa->nxa_flags & NXA_OFFLINE)) {
3232 error = EINVAL;
3233 goto out;
3234 }
3235 vnode_put(mvp);
3236 nameidone(&mnd);
3237 mvp = NULL;
3238 } else {
3239 mp = vnode_mount(mvp);
3240 mount_ref(mp, 0);
3241
3242 /* make sure the file system is NFS-exportable */
3243 nfh.nfh_len = NFSV3_MAX_FID_SIZE;
3244 error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL);
3245 if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) {
3246 error = EIO;
3247 }
3248 if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) {
3249 error = EISDIR;
3250 }
3251 if (error) {
3252 goto out;
3253 }
3254 }
3255 }
3256
3257 /* add an exportfs for it */
3258 MALLOC(nxfs, struct nfs_exportfs *, sizeof(struct nfs_exportfs), M_TEMP, M_WAITOK);
3259 if (!nxfs) {
3260 error = ENOMEM;
3261 goto out;
3262 }
3263 bzero(nxfs, sizeof(struct nfs_exportfs));
3264 nxfs->nxfs_id = unxa->nxa_fsid;
3265 if (mp) {
3266 nxfs_path = mp->mnt_vfsstat.f_mntonname;
3267 nxfs_pathlen = sizeof(mp->mnt_vfsstat.f_mntonname);
3268 } else {
3269 nxfs_path = path;
3270 nxfs_pathlen = pathlen;
3271 }
3272 MALLOC(nxfs->nxfs_path, char*, nxfs_pathlen, M_TEMP, M_WAITOK);
3273 if (!nxfs->nxfs_path) {
3274 FREE(nxfs, M_TEMP);
3275 error = ENOMEM;
3276 goto out;
3277 }
3278 bcopy(nxfs_path, nxfs->nxfs_path, nxfs_pathlen);
3279 /* insert into list in reverse-sorted order */
3280 nxfs3 = NULL;
3281 LIST_FOREACH(nxfs2, &nfsrv_exports, nxfs_next) {
3282 if (strncmp(nxfs->nxfs_path, nxfs2->nxfs_path, MAXPATHLEN) > 0) {
3283 break;
3284 }
3285 nxfs3 = nxfs2;
3286 }
3287 if (nxfs2) {
3288 LIST_INSERT_BEFORE(nxfs2, nxfs, nxfs_next);
3289 } else if (nxfs3) {
3290 LIST_INSERT_AFTER(nxfs3, nxfs, nxfs_next);
3291 } else {
3292 LIST_INSERT_HEAD(&nfsrv_exports, nxfs, nxfs_next);
3293 }
3294
3295 /* make sure any quotas are enabled before we export the file system */
3296 if (mp) {
3297 enablequotas(mp, ctx);
3298 }
3299 }
3300
3301 if (unxa->nxa_exppath) {
3302 error = copyinstr(unxa->nxa_exppath, path, MAXPATHLEN, &pathlen);
3303 if (error) {
3304 goto out;
3305 }
3306 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
3307 if (nx->nx_id == unxa->nxa_expid) {
3308 break;
3309 }
3310 }
3311 if (nx) {
3312 /* verify exported FS path matches given path */
3313 if (strncmp(path, nx->nx_path, MAXPATHLEN)) {
3314 error = EEXIST;
3315 goto out;
3316 }
3317 } else {
3318 /* no current export with that ID */
3319 if (!(unxa->nxa_flags & NXA_ADD)) {
3320 error = ENOENT;
3321 goto out;
3322 }
3323 /* add an export for it */
3324 MALLOC(nx, struct nfs_export *, sizeof(struct nfs_export), M_TEMP, M_WAITOK);
3325 if (!nx) {
3326 error = ENOMEM;
3327 goto out1;
3328 }
3329 bzero(nx, sizeof(struct nfs_export));
3330 nx->nx_id = unxa->nxa_expid;
3331 nx->nx_fs = nxfs;
3332 microtime(&nx->nx_exptime);
3333 MALLOC(nx->nx_path, char*, pathlen, M_TEMP, M_WAITOK);
3334 if (!nx->nx_path) {
3335 error = ENOMEM;
3336 FREE(nx, M_TEMP);
3337 nx = NULL;
3338 goto out1;
3339 }
3340 bcopy(path, nx->nx_path, pathlen);
3341 /* initialize the active user list */
3342 nfsrv_init_user_list(&nx->nx_user_list);
3343 /* insert into list in reverse-sorted order */
3344 nx3 = NULL;
3345 LIST_FOREACH(nx2, &nxfs->nxfs_exports, nx_next) {
3346 if (strncmp(nx->nx_path, nx2->nx_path, MAXPATHLEN) > 0) {
3347 break;
3348 }
3349 nx3 = nx2;
3350 }
3351 if (nx2) {
3352 LIST_INSERT_BEFORE(nx2, nx, nx_next);
3353 } else if (nx3) {
3354 LIST_INSERT_AFTER(nx3, nx, nx_next);
3355 } else {
3356 LIST_INSERT_HEAD(&nxfs->nxfs_exports, nx, nx_next);
3357 }
3358 /* insert into hash */
3359 LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs->nxfs_id, nx->nx_id), nx, nx_hash);
3360
3361 /*
3362 * We don't allow/support nested exports. Check if the new entry
3363 * nests with the entries before and after or if there's an
3364 * entry for the file system root and subdirs.
3365 */
3366 error = 0;
3367 if ((nx3 && !strncmp(nx3->nx_path, nx->nx_path, pathlen - 1) &&
3368 (nx3->nx_path[pathlen - 1] == '/')) ||
3369 (nx2 && !strncmp(nx2->nx_path, nx->nx_path, strlen(nx2->nx_path)) &&
3370 (nx->nx_path[strlen(nx2->nx_path)] == '/'))) {
3371 error = EINVAL;
3372 }
3373 if (!error) {
3374 /* check export conflict with fs root export and vice versa */
3375 expisroot = !nx->nx_path[0] ||
3376 ((nx->nx_path[0] == '.') && !nx->nx_path[1]);
3377 LIST_FOREACH(nx2, &nxfs->nxfs_exports, nx_next) {
3378 if (expisroot) {
3379 if (nx2 != nx) {
3380 break;
3381 }
3382 } else if (!nx2->nx_path[0]) {
3383 break;
3384 } else if ((nx2->nx_path[0] == '.') && !nx2->nx_path[1]) {
3385 break;
3386 }
3387 }
3388 if (nx2) {
3389 error = EINVAL;
3390 }
3391 }
3392 if (error) {
3393 /*
3394 * Don't actually return an error because mountd is
3395 * probably about to delete the conflicting export.
3396 * This can happen when a new export momentarily conflicts
3397 * with an old export while the transition is being made.
3398 * Theoretically, mountd could be written to avoid this
3399 * transient situation - but it would greatly increase the
3400 * complexity of mountd for very little overall benefit.
3401 */
3402 printf("nfsrv_export: warning: nested exports: %s/%s\n",
3403 nxfs->nxfs_path, nx->nx_path);
3404 error = 0;
3405 }
3406 nx->nx_fh.nfh_xh.nxh_flags = NXHF_INVALIDFH;
3407 }
3408 /* make sure file handle is set up */
3409 if ((nx->nx_fh.nfh_xh.nxh_version != htonl(NFS_FH_VERSION)) ||
3410 (nx->nx_fh.nfh_xh.nxh_flags & NXHF_INVALIDFH)) {
3411 /* try to set up export root file handle */
3412 nx->nx_fh.nfh_xh.nxh_version = htonl(NFS_FH_VERSION);
3413 nx->nx_fh.nfh_xh.nxh_fsid = htonl(nx->nx_fs->nxfs_id);
3414 nx->nx_fh.nfh_xh.nxh_expid = htonl(nx->nx_id);
3415 nx->nx_fh.nfh_xh.nxh_flags = 0;
3416 nx->nx_fh.nfh_xh.nxh_reserved = 0;
3417 nx->nx_fh.nfh_fhp = (u_char*)&nx->nx_fh.nfh_xh;
3418 bzero(&nx->nx_fh.nfh_fid[0], NFSV2_MAX_FID_SIZE);
3419 if (mvp) {
3420 /* find export root vnode */
3421 if (!nx->nx_path[0] || ((nx->nx_path[0] == '.') && !nx->nx_path[1])) {
3422 /* exporting file system's root directory */
3423 xvp = mvp;
3424 vnode_get(xvp);
3425 } else {
3426 xnd.ni_cnd.cn_nameiop = LOOKUP;
3427#if CONFIG_TRIGGERS
3428 xnd.ni_op = OP_LOOKUP;
3429#endif
3430 xnd.ni_cnd.cn_flags = LOCKLEAF;
3431 xnd.ni_pathlen = (uint32_t)pathlen - 1; // pathlen max value is equal to MAXPATHLEN
3432 xnd.ni_cnd.cn_nameptr = xnd.ni_cnd.cn_pnbuf = path;
3433 xnd.ni_startdir = mvp;
3434 xnd.ni_usedvp = mvp;
3435 xnd.ni_rootdir = rootvnode;
3436 xnd.ni_cnd.cn_context = ctx;
3437 while ((error = lookup(&xnd)) == ERECYCLE) {
3438 xnd.ni_cnd.cn_flags = LOCKLEAF;
3439 xnd.ni_cnd.cn_nameptr = xnd.ni_cnd.cn_pnbuf;
3440 xnd.ni_usedvp = xnd.ni_dvp = xnd.ni_startdir = mvp;
3441 }
3442 if (error) {
3443 goto out1;
3444 }
3445 xvp = xnd.ni_vp;
3446 }
3447
3448 if (vnode_vtype(xvp) != VDIR) {
3449 error = EINVAL;
3450 vnode_put(xvp);
3451 goto out1;
3452 }
3453
3454 /* grab file handle */
3455 nx->nx_fh.nfh_len = NFSV3_MAX_FID_SIZE;
3456 error = VFS_VPTOFH(xvp, (int*)&nx->nx_fh.nfh_len, &nx->nx_fh.nfh_fid[0], NULL);
3457 if (!error && (nx->nx_fh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) {
3458 error = EIO;
3459 } else {
3460 nx->nx_fh.nfh_xh.nxh_fidlen = nx->nx_fh.nfh_len;
3461 nx->nx_fh.nfh_len += sizeof(nx->nx_fh.nfh_xh);
3462 }
3463
3464 vnode_put(xvp);
3465 if (error) {
3466 goto out1;
3467 }
3468 } else {
3469 nx->nx_fh.nfh_xh.nxh_flags = NXHF_INVALIDFH;
3470 nx->nx_fh.nfh_xh.nxh_fidlen = 0;
3471 nx->nx_fh.nfh_len = sizeof(nx->nx_fh.nfh_xh);
3472 }
3473 }
3474 } else {
3475 nx = NULL;
3476 }
3477
3478 /* perform the export changes */
3479 if (unxa->nxa_flags & NXA_DELETE) {
3480 if (!nx) {
3481 /* delete all exports on this file system */
3482 while ((nx = LIST_FIRST(&nxfs->nxfs_exports))) {
3483 LIST_REMOVE(nx, nx_next);
3484 LIST_REMOVE(nx, nx_hash);
3485 /* delete all netopts for this export */
3486 nfsrv_free_addrlist(nx, NULL);
3487 nx->nx_flags &= ~NX_DEFAULTEXPORT;
3488 if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
3489 kauth_cred_unref(&nx->nx_defopt.nxo_cred);
3490 }
3491 /* delete active user list for this export */
3492 nfsrv_free_user_list(&nx->nx_user_list);
3493 FREE(nx->nx_path, M_TEMP);
3494 FREE(nx, M_TEMP);
3495 }
3496 goto out1;
3497 } else if (!unxa->nxa_netcount) {
3498 /* delete all netopts for this export */
3499 nfsrv_free_addrlist(nx, NULL);
3500 nx->nx_flags &= ~NX_DEFAULTEXPORT;
3501 if (IS_VALID_CRED(nx->nx_defopt.nxo_cred)) {
3502 kauth_cred_unref(&nx->nx_defopt.nxo_cred);
3503 }
3504 } else {
3505 /* delete only the netopts for the given addresses */
3506 error = nfsrv_free_addrlist(nx, unxa);
3507 if (error) {
3508 goto out1;
3509 }
3510 }
3511 }
3512 if (unxa->nxa_flags & NXA_ADD) {
3513 /*
3514 * If going offline set the export time so that when
3515 * coming back on line we will present a new write verifier
3516 * to the client.
3517 */
3518 if (unxa->nxa_flags & NXA_OFFLINE) {
3519 microtime(&nx->nx_exptime);
3520 }
3521
3522 error = nfsrv_hang_addrlist(nx, unxa);
3523 if (!error && mp) {
3524 vfs_setflags(mp, MNT_EXPORTED);
3525 }
3526 }
3527
3528out1:
3529 if (nx && !nx->nx_expcnt) {
3530 /* export has no export options */
3531 LIST_REMOVE(nx, nx_next);
3532 LIST_REMOVE(nx, nx_hash);
3533 /* delete active user list for this export */
3534 nfsrv_free_user_list(&nx->nx_user_list);
3535 FREE(nx->nx_path, M_TEMP);
3536 FREE(nx, M_TEMP);
3537 }
3538 if (LIST_EMPTY(&nxfs->nxfs_exports)) {
3539 /* exported file system has no more exports */
3540 LIST_REMOVE(nxfs, nxfs_next);
3541 FREE(nxfs->nxfs_path, M_TEMP);
3542 FREE(nxfs, M_TEMP);
3543 if (mp) {
3544 vfs_clearflags(mp, MNT_EXPORTED);
3545 }
3546 }
3547
3548out:
3549 if (mvp) {
3550 vnode_put(mvp);
3551 nameidone(&mnd);
3552 }
3553unlock_out:
3554 if (mp) {
3555 mount_drop(mp, 0);
3556 }
3557 lck_rw_done(&nfsrv_export_rwlock);
3558 return error;
3559}
3560
3561/*
3562 * Check if there is a least one export that will allow this address.
3563 *
3564 * Return 0, if there is an export that will allow this address,
3565 * else return EACCES
3566 */
3567int
3568nfsrv_check_exports_allow_address(mbuf_t nam)
3569{
3570 struct nfs_exportfs *nxfs;
3571 struct nfs_export *nx;
3572 struct nfs_export_options *nxo = NULL;
3573
3574 if (nam == NULL) {
3575 return EACCES;
3576 }
3577
3578 lck_rw_lock_shared(&nfsrv_export_rwlock);
3579 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
3580 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
3581 /* A little optimizing by checking for the default first */
3582 if (nx->nx_flags & NX_DEFAULTEXPORT) {
3583 nxo = &nx->nx_defopt;
3584 }
3585 if (nxo || (nxo = nfsrv_export_lookup(nx, nam))) {
3586 goto found;
3587 }
3588 }
3589 }
3590found:
3591 lck_rw_done(&nfsrv_export_rwlock);
3592
3593 return nxo ? 0 : EACCES;
3594}
3595
3596struct nfs_export_options *
3597nfsrv_export_lookup(struct nfs_export *nx, mbuf_t nam)
3598{
3599 struct nfs_export_options *nxo = NULL;
3600 struct nfs_netopt *no = NULL;
3601 struct radix_node_head *rnh;
3602 struct sockaddr *saddr;
3603
3604 /* Lookup in the export list first. */
3605 if (nam != NULL) {
3606 saddr = mbuf_data(nam);
3607 if (saddr->sa_family > AF_MAX) {
3608 /* Bogus sockaddr? Don't match anything. */
3609 return NULL;
3610 }
3611 rnh = nx->nx_rtable[saddr->sa_family];
3612 if (rnh != NULL) {
3613 no = (struct nfs_netopt *)
3614 (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh);
3615 if (no && no->no_rnodes->rn_flags & RNF_ROOT) {
3616 no = NULL;
3617 }
3618 if (no) {
3619 nxo = &no->no_opt;
3620 }
3621 }
3622 }
3623 /* If no address match, use the default if it exists. */
3624 if ((nxo == NULL) && (nx->nx_flags & NX_DEFAULTEXPORT)) {
3625 nxo = &nx->nx_defopt;
3626 }
3627 return nxo;
3628}
3629
3630/* find an export for the given handle */
3631struct nfs_export *
3632nfsrv_fhtoexport(struct nfs_filehandle *nfhp)
3633{
3634 struct nfs_exphandle *nxh = (struct nfs_exphandle*)nfhp->nfh_fhp;
3635 struct nfs_export *nx;
3636 uint32_t fsid, expid;
3637
3638 if (!nfsrv_export_hashtbl) {
3639 return NULL;
3640 }
3641 fsid = ntohl(nxh->nxh_fsid);
3642 expid = ntohl(nxh->nxh_expid);
3643 nx = NFSRVEXPHASH(fsid, expid)->lh_first;
3644 for (; nx; nx = LIST_NEXT(nx, nx_hash)) {
3645 if (nx->nx_fs->nxfs_id != fsid) {
3646 continue;
3647 }
3648 if (nx->nx_id != expid) {
3649 continue;
3650 }
3651 break;
3652 }
3653 return nx;
3654}
3655
3656struct nfsrv_getvfs_by_mntonname_callback_args {
3657 const char *path; /* IN */
3658 mount_t mp; /* OUT */
3659};
3660
3661static int
3662nfsrv_getvfs_by_mntonname_callback(mount_t mp, void *v)
3663{
3664 struct nfsrv_getvfs_by_mntonname_callback_args * const args = v;
3665 char real_mntonname[MAXPATHLEN];
3666 int pathbuflen = MAXPATHLEN;
3667 vnode_t rvp;
3668 int error;
3669
3670 error = VFS_ROOT(mp, &rvp, vfs_context_current());
3671 if (error) {
3672 goto out;
3673 }
3674 error = vn_getpath_ext(rvp, NULLVP, real_mntonname, &pathbuflen,
3675 VN_GETPATH_FSENTER | VN_GETPATH_NO_FIRMLINK);
3676 vnode_put(rvp);
3677 if (error) {
3678 goto out;
3679 }
3680 if (strcmp(args->path, real_mntonname) == 0) {
3681 error = vfs_busy(mp, LK_NOWAIT);
3682 if (error == 0) {
3683 args->mp = mp;
3684 }
3685 return VFS_RETURNED_DONE;
3686 }
3687out:
3688 return VFS_RETURNED;
3689}
3690
3691static mount_t
3692nfsrv_getvfs_by_mntonname(char *path)
3693{
3694 struct nfsrv_getvfs_by_mntonname_callback_args args = {
3695 .path = path,
3696 .mp = NULL,
3697 };
3698 mount_t mp;
3699 int error;
3700
3701 mp = vfs_getvfs_by_mntonname(path);
3702 if (mp) {
3703 error = vfs_busy(mp, LK_NOWAIT);
3704 mount_iterdrop(mp);
3705 if (error) {
3706 mp = NULL;
3707 }
3708 } else if (vfs_iterate(0, nfsrv_getvfs_by_mntonname_callback,
3709 &args) == 0) {
3710 mp = args.mp;
3711 }
3712 return mp;
3713}
3714
3715/*
3716 * nfsrv_fhtovp() - convert FH to vnode and export info
3717 */
3718int
3719nfsrv_fhtovp(
3720 struct nfs_filehandle *nfhp,
3721 struct nfsrv_descript *nd,
3722 vnode_t *vpp,
3723 struct nfs_export **nxp,
3724 struct nfs_export_options **nxop)
3725{
3726 struct nfs_exphandle *nxh = (struct nfs_exphandle*)nfhp->nfh_fhp;
3727 struct nfs_export_options *nxo;
3728 u_char *fidp;
3729 int error;
3730 struct mount *mp;
3731 mbuf_t nam = NULL;
3732 uint32_t v;
3733 int i, valid;
3734
3735 *vpp = NULL;
3736 *nxp = NULL;
3737 *nxop = NULL;
3738
3739 if (nd != NULL) {
3740 nam = nd->nd_nam;
3741 }
3742
3743 v = ntohl(nxh->nxh_version);
3744 if (v != NFS_FH_VERSION) {
3745 /* file handle format not supported */
3746 return ESTALE;
3747 }
3748 if (nfhp->nfh_len > NFSV3_MAX_FH_SIZE) {
3749 return EBADRPC;
3750 }
3751 if (nfhp->nfh_len < (int)sizeof(struct nfs_exphandle)) {
3752 return ESTALE;
3753 }
3754 v = ntohs(nxh->nxh_flags);
3755 if (v & NXHF_INVALIDFH) {
3756 return ESTALE;
3757 }
3758
3759 *nxp = nfsrv_fhtoexport(nfhp);
3760 if (!*nxp) {
3761 return ESTALE;
3762 }
3763
3764 /* Get the export option structure for this <export, client> tuple. */
3765 *nxop = nxo = nfsrv_export_lookup(*nxp, nam);
3766 if (nam && (*nxop == NULL)) {
3767 return EACCES;
3768 }
3769
3770 if (nd != NULL) {
3771 /* Validate the security flavor of the request */
3772 for (i = 0, valid = 0; i < nxo->nxo_sec.count; i++) {
3773 if (nd->nd_sec == nxo->nxo_sec.flavors[i]) {
3774 valid = 1;
3775 break;
3776 }
3777 }
3778 if (!valid) {
3779 /*
3780 * RFC 2623 section 2.3.2 recommends no authentication
3781 * requirement for certain NFS procedures used for mounting.
3782 * This allows an unauthenticated superuser on the client
3783 * to do mounts for the benefit of authenticated users.
3784 */
3785 if (nd->nd_vers == NFS_VER2) {
3786 if (nd->nd_procnum == NFSV2PROC_GETATTR ||
3787 nd->nd_procnum == NFSV2PROC_STATFS) {
3788 valid = 1;
3789 }
3790 }
3791 if (nd->nd_vers == NFS_VER3) {
3792 if (nd->nd_procnum == NFSPROC_FSINFO) {
3793 valid = 1;
3794 }
3795 }
3796
3797 if (!valid) {
3798 return NFSERR_AUTHERR | AUTH_REJECTCRED;
3799 }
3800 }
3801 }
3802
3803 if (nxo && (nxo->nxo_flags & NX_OFFLINE)) {
3804 return (nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER;
3805 }
3806
3807 /* find mount structure */
3808 mp = nfsrv_getvfs_by_mntonname((*nxp)->nx_fs->nxfs_path);
3809 if (!mp) {
3810 /*
3811 * We have an export, but no mount?
3812 * Perhaps the export just hasn't been marked offline yet.
3813 */
3814 return (nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER;
3815 }
3816
3817 fidp = nfhp->nfh_fhp + sizeof(*nxh);
3818 error = VFS_FHTOVP(mp, nxh->nxh_fidlen, fidp, vpp, NULL);
3819 vfs_unbusy(mp);
3820 if (error) {
3821 return error;
3822 }
3823 /* vnode pointer should be good at this point or ... */
3824 if (*vpp == NULL) {
3825 return ESTALE;
3826 }
3827 return 0;
3828}
3829
3830/*
3831 * nfsrv_credcheck() - check/map credentials according
3832 * to given export options.
3833 */
3834int
3835nfsrv_credcheck(
3836 struct nfsrv_descript *nd,
3837 vfs_context_t ctx,
3838 __unused struct nfs_export *nx,
3839 struct nfs_export_options *nxo)
3840{
3841 if (nxo && nxo->nxo_cred) {
3842 if ((nxo->nxo_flags & NX_MAPALL) ||
3843 ((nxo->nxo_flags & NX_MAPROOT) && !suser(nd->nd_cr, NULL))) {
3844 kauth_cred_ref(nxo->nxo_cred);
3845 kauth_cred_unref(&nd->nd_cr);
3846 nd->nd_cr = nxo->nxo_cred;
3847 }
3848 }
3849 ctx->vc_ucred = nd->nd_cr;
3850 return 0;
3851}
3852
3853/*
3854 * nfsrv_vptofh() - convert vnode to file handle for given export
3855 *
3856 * If the caller is passing in a vnode for a ".." directory entry,
3857 * they can pass a directory NFS file handle (dnfhp) which will be
3858 * checked against the root export file handle. If it matches, we
3859 * refuse to provide the file handle for the out-of-export directory.
3860 */
3861int
3862nfsrv_vptofh(
3863 struct nfs_export *nx,
3864 int nfsvers,
3865 struct nfs_filehandle *dnfhp,
3866 vnode_t vp,
3867 vfs_context_t ctx,
3868 struct nfs_filehandle *nfhp)
3869{
3870 int error;
3871 uint32_t maxfidsize;
3872
3873 nfhp->nfh_fhp = (u_char*)&nfhp->nfh_xh;
3874 nfhp->nfh_xh.nxh_version = htonl(NFS_FH_VERSION);
3875 nfhp->nfh_xh.nxh_fsid = htonl(nx->nx_fs->nxfs_id);
3876 nfhp->nfh_xh.nxh_expid = htonl(nx->nx_id);
3877 nfhp->nfh_xh.nxh_flags = 0;
3878 nfhp->nfh_xh.nxh_reserved = 0;
3879
3880 if (nfsvers == NFS_VER2) {
3881 bzero(&nfhp->nfh_fid[0], NFSV2_MAX_FID_SIZE);
3882 }
3883
3884 /* if directory FH matches export root, return invalid FH */
3885 if (dnfhp && nfsrv_fhmatch(dnfhp, &nx->nx_fh)) {
3886 if (nfsvers == NFS_VER2) {
3887 nfhp->nfh_len = NFSX_V2FH;
3888 } else {
3889 nfhp->nfh_len = sizeof(nfhp->nfh_xh);
3890 }
3891 nfhp->nfh_xh.nxh_fidlen = 0;
3892 nfhp->nfh_xh.nxh_flags = htons(NXHF_INVALIDFH);
3893 return 0;
3894 }
3895
3896 if (nfsvers == NFS_VER2) {
3897 maxfidsize = NFSV2_MAX_FID_SIZE;
3898 } else {
3899 maxfidsize = NFSV3_MAX_FID_SIZE;
3900 }
3901 nfhp->nfh_len = maxfidsize;
3902
3903 error = VFS_VPTOFH(vp, (int*)&nfhp->nfh_len, &nfhp->nfh_fid[0], ctx);
3904 if (error) {
3905 return error;
3906 }
3907 if (nfhp->nfh_len > maxfidsize) {
3908 return EOVERFLOW;
3909 }
3910 nfhp->nfh_xh.nxh_fidlen = nfhp->nfh_len;
3911 nfhp->nfh_len += sizeof(nfhp->nfh_xh);
3912 if ((nfsvers == NFS_VER2) && (nfhp->nfh_len < NFSX_V2FH)) {
3913 nfhp->nfh_len = NFSX_V2FH;
3914 }
3915
3916 return 0;
3917}
3918
3919/*
3920 * Compare two file handles to see it they're the same.
3921 * Note that we don't use nfh_len because that may include
3922 * padding in an NFSv2 file handle.
3923 */
3924int
3925nfsrv_fhmatch(struct nfs_filehandle *fh1, struct nfs_filehandle *fh2)
3926{
3927 struct nfs_exphandle *nxh1, *nxh2;
3928 int len1, len2;
3929
3930 nxh1 = (struct nfs_exphandle *)fh1->nfh_fhp;
3931 nxh2 = (struct nfs_exphandle *)fh2->nfh_fhp;
3932 len1 = sizeof(fh1->nfh_xh) + nxh1->nxh_fidlen;
3933 len2 = sizeof(fh2->nfh_xh) + nxh2->nxh_fidlen;
3934 if (len1 != len2) {
3935 return 0;
3936 }
3937 if (bcmp(nxh1, nxh2, len1)) {
3938 return 0;
3939 }
3940 return 1;
3941}
3942
3943/*
3944 * Functions for dealing with active user lists
3945 */
3946
3947/*
3948 * Search the hash table for a user node with a matching IP address and uid field.
3949 * If found, the node's tm_last timestamp is updated and the node is returned.
3950 *
3951 * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned.
3952 * Returns NULL if a new node could not be allocated OR saddr length exceeds sizeof(unode->sock).
3953 *
3954 * The list's user_mutex lock MUST be held.
3955 */
3956struct nfs_user_stat_node *
3957nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *saddr, uid_t uid)
3958{
3959 struct nfs_user_stat_node *unode;
3960 struct timeval now;
3961 struct nfs_user_stat_hashtbl_head *head;
3962
3963 /* seach the hash table */
3964 head = NFS_USER_STAT_HASH(list->user_hashtbl, uid);
3965 LIST_FOREACH(unode, head, hash_link) {
3966 if ((uid == unode->uid) && (nfs_sockaddr_cmp(saddr, (struct sockaddr*)&unode->sock) == 0)) {
3967 /* found matching node */
3968 break;
3969 }
3970 }
3971
3972 if (unode) {
3973 /* found node in the hash table, now update lru position */
3974 TAILQ_REMOVE(&list->user_lru, unode, lru_link);
3975 TAILQ_INSERT_TAIL(&list->user_lru, unode, lru_link);
3976
3977 /* update time stamp */
3978 microtime(&now);
3979 unode->tm_last = (uint32_t)now.tv_sec;
3980 return unode;
3981 }
3982
3983 if (saddr->sa_len > sizeof(((struct nfs_user_stat_node *)0)->sock)) {
3984 /* saddr length exceeds maximum value */
3985 return NULL;
3986 }
3987
3988 if (list->node_count < nfsrv_user_stat_max_nodes) {
3989 /* Allocate a new node */
3990 MALLOC(unode, struct nfs_user_stat_node *, sizeof(struct nfs_user_stat_node),
3991 M_TEMP, M_WAITOK | M_ZERO);
3992
3993 if (!unode) {
3994 return NULL;
3995 }
3996
3997 /* increment node count */
3998 OSAddAtomic(1, &nfsrv_user_stat_node_count);
3999 list->node_count++;
4000 } else {
4001 /* reuse the oldest node in the lru list */
4002 unode = TAILQ_FIRST(&list->user_lru);
4003
4004 if (!unode) {
4005 return NULL;
4006 }
4007
4008 /* Remove the node */
4009 TAILQ_REMOVE(&list->user_lru, unode, lru_link);
4010 LIST_REMOVE(unode, hash_link);
4011 }
4012
4013 /* Initialize the node */
4014 unode->uid = uid;
4015 bcopy(saddr, &unode->sock, MIN(saddr->sa_len, sizeof(unode->sock)));
4016 microtime(&now);
4017 unode->ops = 0;
4018 unode->bytes_read = 0;
4019 unode->bytes_written = 0;
4020 unode->tm_start = (uint32_t)now.tv_sec;
4021 unode->tm_last = (uint32_t)now.tv_sec;
4022
4023 /* insert the node */
4024 TAILQ_INSERT_TAIL(&list->user_lru, unode, lru_link);
4025 LIST_INSERT_HEAD(head, unode, hash_link);
4026
4027 return unode;
4028}
4029
4030void
4031nfsrv_update_user_stat(struct nfs_export *nx, struct nfsrv_descript *nd, uid_t uid, u_int ops, u_int rd_bytes, u_int wr_bytes)
4032{
4033 struct nfs_user_stat_node *unode;
4034 struct nfs_active_user_list *ulist;
4035 struct sockaddr *saddr;
4036
4037 if ((!nfsrv_user_stat_enabled) || (!nx) || (!nd) || (!nd->nd_nam)) {
4038 return;
4039 }
4040
4041 saddr = (struct sockaddr *)mbuf_data(nd->nd_nam);
4042
4043 /* check address family before going any further */
4044 if ((saddr->sa_family != AF_INET) && (saddr->sa_family != AF_INET6)) {
4045 return;
4046 }
4047
4048 ulist = &nx->nx_user_list;
4049
4050 /* lock the active user list */
4051 lck_mtx_lock(&ulist->user_mutex);
4052
4053 /* get the user node */
4054 unode = nfsrv_get_user_stat_node(ulist, saddr, uid);
4055
4056 if (!unode) {
4057 lck_mtx_unlock(&ulist->user_mutex);
4058 return;
4059 }
4060
4061 /* update counters */
4062 unode->ops += ops;
4063 unode->bytes_read += rd_bytes;
4064 unode->bytes_written += wr_bytes;
4065
4066 /* done */
4067 lck_mtx_unlock(&ulist->user_mutex);
4068}
4069
4070/* initialize an active user list */
4071void
4072nfsrv_init_user_list(struct nfs_active_user_list *ulist)
4073{
4074 uint i;
4075
4076 /* initialize the lru */
4077 TAILQ_INIT(&ulist->user_lru);
4078
4079 /* initialize the hash table */
4080 for (i = 0; i < NFS_USER_STAT_HASH_SIZE; i++) {
4081 LIST_INIT(&ulist->user_hashtbl[i]);
4082 }
4083 ulist->node_count = 0;
4084
4085 lck_mtx_init(&ulist->user_mutex, &nfsrv_active_user_mutex_group, LCK_ATTR_NULL);
4086}
4087
4088/* Free all nodes in an active user list */
4089void
4090nfsrv_free_user_list(struct nfs_active_user_list *ulist)
4091{
4092 struct nfs_user_stat_node *unode;
4093
4094 if (!ulist) {
4095 return;
4096 }
4097
4098 while ((unode = TAILQ_FIRST(&ulist->user_lru))) {
4099 /* Remove node and free */
4100 TAILQ_REMOVE(&ulist->user_lru, unode, lru_link);
4101 LIST_REMOVE(unode, hash_link);
4102 FREE(unode, M_TEMP);
4103
4104 /* decrement node count */
4105 OSAddAtomic(-1, &nfsrv_user_stat_node_count);
4106 }
4107 ulist->node_count = 0;
4108
4109 lck_mtx_destroy(&ulist->user_mutex, &nfsrv_active_user_mutex_group);
4110}
4111
4112/* Reclaim old expired user nodes from active user lists. */
4113void
4114nfsrv_active_user_list_reclaim(void)
4115{
4116 struct nfs_exportfs *nxfs;
4117 struct nfs_export *nx;
4118 struct nfs_active_user_list *ulist;
4119 struct nfs_user_stat_hashtbl_head oldlist;
4120 struct nfs_user_stat_node *unode, *unode_next;
4121 struct timeval now;
4122 long tstale;
4123
4124 LIST_INIT(&oldlist);
4125
4126 lck_rw_lock_shared(&nfsrv_export_rwlock);
4127 microtime(&now);
4128 tstale = now.tv_sec - nfsrv_user_stat_max_idle_sec;
4129 LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) {
4130 LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) {
4131 /* Scan through all user nodes of this export */
4132 ulist = &nx->nx_user_list;
4133 lck_mtx_lock(&ulist->user_mutex);
4134 for (unode = TAILQ_FIRST(&ulist->user_lru); unode; unode = unode_next) {
4135 unode_next = TAILQ_NEXT(unode, lru_link);
4136
4137 /* check if this node has expired */
4138 if (unode->tm_last >= tstale) {
4139 break;
4140 }
4141
4142 /* Remove node from the active user list */
4143 TAILQ_REMOVE(&ulist->user_lru, unode, lru_link);
4144 LIST_REMOVE(unode, hash_link);
4145
4146 /* Add node to temp list */
4147 LIST_INSERT_HEAD(&oldlist, unode, hash_link);
4148
4149 /* decrement node count */
4150 OSAddAtomic(-1, &nfsrv_user_stat_node_count);
4151 ulist->node_count--;
4152 }
4153 /* can unlock this export's list now */
4154 lck_mtx_unlock(&ulist->user_mutex);
4155 }
4156 }
4157 lck_rw_done(&nfsrv_export_rwlock);
4158
4159 /* Free expired nodes */
4160 while ((unode = LIST_FIRST(&oldlist))) {
4161 LIST_REMOVE(unode, hash_link);
4162 FREE(unode, M_TEMP);
4163 }
4164}
4165
4166/*
4167 * Maps errno values to nfs error numbers.
4168 * Use NFSERR_IO as the catch all for ones not specifically defined in
4169 * RFC 1094.
4170 */
4171static u_char nfsrv_v2errmap[] = {
4172 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4173 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4174 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO,
4175 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR,
4176 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4177 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS,
4178 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4179 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4180 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4181 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4182 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4183 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO,
4184 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO,
4185 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE,
4186};
4187
4188/*
4189 * Maps errno values to nfs error numbers.
4190 * Although it is not obvious whether or not NFS clients really care if
4191 * a returned error value is in the specified list for the procedure, the
4192 * safest thing to do is filter them appropriately. For Version 2, the
4193 * X/Open XNFS document is the only specification that defines error values
4194 * for each RPC (The RFC simply lists all possible error values for all RPCs),
4195 * so I have decided to not do this for Version 2.
4196 * The first entry is the default error return and the rest are the valid
4197 * errors for that RPC in increasing numeric order.
4198 */
4199static short nfsv3err_null[] = {
4200 0,
4201 0,
4202};
4203
4204static short nfsv3err_getattr[] = {
4205 NFSERR_IO,
4206 NFSERR_IO,
4207 NFSERR_STALE,
4208 NFSERR_BADHANDLE,
4209 NFSERR_SERVERFAULT,
4210 NFSERR_TRYLATER,
4211 0,
4212};
4213
4214static short nfsv3err_setattr[] = {
4215 NFSERR_IO,
4216 NFSERR_PERM,
4217 NFSERR_IO,
4218 NFSERR_ACCES,
4219 NFSERR_INVAL,
4220 NFSERR_NOSPC,
4221 NFSERR_ROFS,
4222 NFSERR_DQUOT,
4223 NFSERR_STALE,
4224 NFSERR_BADHANDLE,
4225 NFSERR_NOT_SYNC,
4226 NFSERR_SERVERFAULT,
4227 NFSERR_TRYLATER,
4228 0,
4229};
4230
4231static short nfsv3err_lookup[] = {
4232 NFSERR_IO,
4233 NFSERR_NOENT,
4234 NFSERR_IO,
4235 NFSERR_ACCES,
4236 NFSERR_NOTDIR,
4237 NFSERR_NAMETOL,
4238 NFSERR_STALE,
4239 NFSERR_BADHANDLE,
4240 NFSERR_SERVERFAULT,
4241 NFSERR_TRYLATER,
4242 0,
4243};
4244
4245static short nfsv3err_access[] = {
4246 NFSERR_IO,
4247 NFSERR_IO,
4248 NFSERR_STALE,
4249 NFSERR_BADHANDLE,
4250 NFSERR_SERVERFAULT,
4251 NFSERR_TRYLATER,
4252 0,
4253};
4254
4255static short nfsv3err_readlink[] = {
4256 NFSERR_IO,
4257 NFSERR_IO,
4258 NFSERR_ACCES,
4259 NFSERR_INVAL,
4260 NFSERR_STALE,
4261 NFSERR_BADHANDLE,
4262 NFSERR_NOTSUPP,
4263 NFSERR_SERVERFAULT,
4264 NFSERR_TRYLATER,
4265 0,
4266};
4267
4268static short nfsv3err_read[] = {
4269 NFSERR_IO,
4270 NFSERR_IO,
4271 NFSERR_NXIO,
4272 NFSERR_ACCES,
4273 NFSERR_INVAL,
4274 NFSERR_STALE,
4275 NFSERR_BADHANDLE,
4276 NFSERR_SERVERFAULT,
4277 NFSERR_TRYLATER,
4278 0,
4279};
4280
4281static short nfsv3err_write[] = {
4282 NFSERR_IO,
4283 NFSERR_IO,
4284 NFSERR_ACCES,
4285 NFSERR_INVAL,
4286 NFSERR_FBIG,
4287 NFSERR_NOSPC,
4288 NFSERR_ROFS,
4289 NFSERR_DQUOT,
4290 NFSERR_STALE,
4291 NFSERR_BADHANDLE,
4292 NFSERR_SERVERFAULT,
4293 NFSERR_TRYLATER,
4294 0,
4295};
4296
4297static short nfsv3err_create[] = {
4298 NFSERR_IO,
4299 NFSERR_IO,
4300 NFSERR_ACCES,
4301 NFSERR_EXIST,
4302 NFSERR_NOTDIR,
4303 NFSERR_NOSPC,
4304 NFSERR_ROFS,
4305 NFSERR_NAMETOL,
4306 NFSERR_DQUOT,
4307 NFSERR_STALE,
4308 NFSERR_BADHANDLE,
4309 NFSERR_NOTSUPP,
4310 NFSERR_SERVERFAULT,
4311 NFSERR_TRYLATER,
4312 0,
4313};
4314
4315static short nfsv3err_mkdir[] = {
4316 NFSERR_IO,
4317 NFSERR_IO,
4318 NFSERR_ACCES,
4319 NFSERR_EXIST,
4320 NFSERR_NOTDIR,
4321 NFSERR_NOSPC,
4322 NFSERR_ROFS,
4323 NFSERR_NAMETOL,
4324 NFSERR_DQUOT,
4325 NFSERR_STALE,
4326 NFSERR_BADHANDLE,
4327 NFSERR_NOTSUPP,
4328 NFSERR_SERVERFAULT,
4329 NFSERR_TRYLATER,
4330 0,
4331};
4332
4333static short nfsv3err_symlink[] = {
4334 NFSERR_IO,
4335 NFSERR_IO,
4336 NFSERR_ACCES,
4337 NFSERR_EXIST,
4338 NFSERR_NOTDIR,
4339 NFSERR_NOSPC,
4340 NFSERR_ROFS,
4341 NFSERR_NAMETOL,
4342 NFSERR_DQUOT,
4343 NFSERR_STALE,
4344 NFSERR_BADHANDLE,
4345 NFSERR_NOTSUPP,
4346 NFSERR_SERVERFAULT,
4347 NFSERR_TRYLATER,
4348 0,
4349};
4350
4351static short nfsv3err_mknod[] = {
4352 NFSERR_IO,
4353 NFSERR_IO,
4354 NFSERR_ACCES,
4355 NFSERR_EXIST,
4356 NFSERR_NOTDIR,
4357 NFSERR_NOSPC,
4358 NFSERR_ROFS,
4359 NFSERR_NAMETOL,
4360 NFSERR_DQUOT,
4361 NFSERR_STALE,
4362 NFSERR_BADHANDLE,
4363 NFSERR_NOTSUPP,
4364 NFSERR_SERVERFAULT,
4365 NFSERR_BADTYPE,
4366 NFSERR_TRYLATER,
4367 0,
4368};
4369
4370static short nfsv3err_remove[] = {
4371 NFSERR_IO,
4372 NFSERR_NOENT,
4373 NFSERR_IO,
4374 NFSERR_ACCES,
4375 NFSERR_NOTDIR,
4376 NFSERR_ROFS,
4377 NFSERR_NAMETOL,
4378 NFSERR_STALE,
4379 NFSERR_BADHANDLE,
4380 NFSERR_SERVERFAULT,
4381 NFSERR_TRYLATER,
4382 0,
4383};
4384
4385static short nfsv3err_rmdir[] = {
4386 NFSERR_IO,
4387 NFSERR_NOENT,
4388 NFSERR_IO,
4389 NFSERR_ACCES,
4390 NFSERR_EXIST,
4391 NFSERR_NOTDIR,
4392 NFSERR_INVAL,
4393 NFSERR_ROFS,
4394 NFSERR_NAMETOL,
4395 NFSERR_NOTEMPTY,
4396 NFSERR_STALE,
4397 NFSERR_BADHANDLE,
4398 NFSERR_NOTSUPP,
4399 NFSERR_SERVERFAULT,
4400 NFSERR_TRYLATER,
4401 0,
4402};
4403
4404static short nfsv3err_rename[] = {
4405 NFSERR_IO,
4406 NFSERR_NOENT,
4407 NFSERR_IO,
4408 NFSERR_ACCES,
4409 NFSERR_EXIST,
4410 NFSERR_XDEV,
4411 NFSERR_NOTDIR,
4412 NFSERR_ISDIR,
4413 NFSERR_INVAL,
4414 NFSERR_NOSPC,
4415 NFSERR_ROFS,
4416 NFSERR_MLINK,
4417 NFSERR_NAMETOL,
4418 NFSERR_NOTEMPTY,
4419 NFSERR_DQUOT,
4420 NFSERR_STALE,
4421 NFSERR_BADHANDLE,
4422 NFSERR_NOTSUPP,
4423 NFSERR_SERVERFAULT,
4424 NFSERR_TRYLATER,
4425 0,
4426};
4427
4428static short nfsv3err_link[] = {
4429 NFSERR_IO,
4430 NFSERR_IO,
4431 NFSERR_ACCES,
4432 NFSERR_EXIST,
4433 NFSERR_XDEV,
4434 NFSERR_NOTDIR,
4435 NFSERR_INVAL,
4436 NFSERR_NOSPC,
4437 NFSERR_ROFS,
4438 NFSERR_MLINK,
4439 NFSERR_NAMETOL,
4440 NFSERR_DQUOT,
4441 NFSERR_STALE,
4442 NFSERR_BADHANDLE,
4443 NFSERR_NOTSUPP,
4444 NFSERR_SERVERFAULT,
4445 NFSERR_TRYLATER,
4446 0,
4447};
4448
4449static short nfsv3err_readdir[] = {
4450 NFSERR_IO,
4451 NFSERR_IO,
4452 NFSERR_ACCES,
4453 NFSERR_NOTDIR,
4454 NFSERR_STALE,
4455 NFSERR_BADHANDLE,
4456 NFSERR_BAD_COOKIE,
4457 NFSERR_TOOSMALL,
4458 NFSERR_SERVERFAULT,
4459 NFSERR_TRYLATER,
4460 0,
4461};
4462
4463static short nfsv3err_readdirplus[] = {
4464 NFSERR_IO,
4465 NFSERR_IO,
4466 NFSERR_ACCES,
4467 NFSERR_NOTDIR,
4468 NFSERR_STALE,
4469 NFSERR_BADHANDLE,
4470 NFSERR_BAD_COOKIE,
4471 NFSERR_NOTSUPP,
4472 NFSERR_TOOSMALL,
4473 NFSERR_SERVERFAULT,
4474 NFSERR_TRYLATER,
4475 0,
4476};
4477
4478static short nfsv3err_fsstat[] = {
4479 NFSERR_IO,
4480 NFSERR_IO,
4481 NFSERR_STALE,
4482 NFSERR_BADHANDLE,
4483 NFSERR_SERVERFAULT,
4484 NFSERR_TRYLATER,
4485 0,
4486};
4487
4488static short nfsv3err_fsinfo[] = {
4489 NFSERR_STALE,
4490 NFSERR_STALE,
4491 NFSERR_BADHANDLE,
4492 NFSERR_SERVERFAULT,
4493 NFSERR_TRYLATER,
4494 0,
4495};
4496
4497static short nfsv3err_pathconf[] = {
4498 NFSERR_STALE,
4499 NFSERR_STALE,
4500 NFSERR_BADHANDLE,
4501 NFSERR_SERVERFAULT,
4502 NFSERR_TRYLATER,
4503 0,
4504};
4505
4506static short nfsv3err_commit[] = {
4507 NFSERR_IO,
4508 NFSERR_IO,
4509 NFSERR_STALE,
4510 NFSERR_BADHANDLE,
4511 NFSERR_SERVERFAULT,
4512 NFSERR_TRYLATER,
4513 0,
4514};
4515
4516static short *nfsrv_v3errmap[] = {
4517 nfsv3err_null,
4518 nfsv3err_getattr,
4519 nfsv3err_setattr,
4520 nfsv3err_lookup,
4521 nfsv3err_access,
4522 nfsv3err_readlink,
4523 nfsv3err_read,
4524 nfsv3err_write,
4525 nfsv3err_create,
4526 nfsv3err_mkdir,
4527 nfsv3err_symlink,
4528 nfsv3err_mknod,
4529 nfsv3err_remove,
4530 nfsv3err_rmdir,
4531 nfsv3err_rename,
4532 nfsv3err_link,
4533 nfsv3err_readdir,
4534 nfsv3err_readdirplus,
4535 nfsv3err_fsstat,
4536 nfsv3err_fsinfo,
4537 nfsv3err_pathconf,
4538 nfsv3err_commit,
4539};
4540
4541/*
4542 * Map errnos to NFS error numbers. For Version 3 also filter out error
4543 * numbers not specified for the associated procedure.
4544 */
4545int
4546nfsrv_errmap(struct nfsrv_descript *nd, int err)
4547{
4548 short *defaulterrp, *errp;
4549
4550 if (nd->nd_vers == NFS_VER2) {
4551 if (err <= (int)sizeof(nfsrv_v2errmap)) {
4552 return (int)nfsrv_v2errmap[err - 1];
4553 }
4554 return NFSERR_IO;
4555 }
4556 /* NFSv3 */
4557 if (nd->nd_procnum > NFSPROC_COMMIT) {
4558 return err & 0xffff;
4559 }
4560 errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum];
4561 while (*++errp) {
4562 if (*errp == err) {
4563 return err;
4564 } else if (*errp > err) {
4565 break;
4566 }
4567 }
4568 return (int)*defaulterrp;
4569}
4570
4571#endif /* CONFIG_NFS_SERVER */
4572
4573#endif /* CONFIG_NFS */