2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $
69 * These functions support the macros and help fiddle mbuf chains for
70 * the nfs op functions. They do things like create the rpc header and
71 * copy data between mbuf chains and uio lists.
73 #include <sys/param.h>
75 #include <sys/kauth.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/mount_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/kpi_mbuf.h>
81 #include <sys/socket.h>
84 #include <sys/malloc.h>
85 #include <sys/syscall.h>
86 #include <sys/ubc_internal.h>
87 #include <sys/fcntl.h>
89 #include <sys/domain.h>
90 #include <libkern/OSAtomic.h>
91 #include <kern/thread_call.h>
92 #include <kern/task.h>
95 #include <sys/vmparam.h>
98 #include <kern/clock.h>
100 #include <nfs/rpcv2.h>
101 #include <nfs/nfsproto.h>
103 #include <nfs/nfsnode.h>
105 #define _NFS_XDR_SUBS_FUNCS_ /* define this to get xdrbuf function definitions */
107 #include <nfs/xdr_subs.h>
108 #include <nfs/nfsm_subs.h>
109 #include <nfs/nfs_gss.h>
110 #include <nfs/nfsmount.h>
111 #include <nfs/nfs_lock.h>
113 #include <miscfs/specfs/specdev.h>
115 #include <netinet/in.h>
116 #include <net/kpi_interface.h>
118 #include <sys/utfconv.h>
123 struct nfsstats
__attribute__((aligned(8))) nfsstats
;
124 size_t nfs_mbuf_mhlen
= 0, nfs_mbuf_minclsize
= 0;
127 * functions to convert between NFS and VFS types
130 vtonfs_type(enum vtype vtype
, int nfsvers
)
146 if (nfsvers
> NFS_VER2
) {
150 if (nfsvers
> NFS_VER2
) {
162 nfstov_type(nfstype nvtype
, int nfsvers
)
178 if (nfsvers
> NFS_VER2
) {
182 if (nfsvers
> NFS_VER2
) {
186 if (nfsvers
> NFS_VER3
) {
190 if (nfsvers
> NFS_VER3
) {
199 vtonfsv2_mode(enum vtype vtype
, mode_t m
)
209 return vnode_makeimode(vtype
, m
);
211 return vnode_makeimode(VCHR
, m
);
216 return vnode_makeimode(VNON
, m
);
223 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
225 int nfsv3_procid
[NFS_NPROCS
] = {
251 #endif /* NFSSERVER */
254 * and the reverse mapping from generic to Version 2 procedure numbers
256 int nfsv2_procid
[NFS_NPROCS
] = {
284 * initialize NFS's cache of mbuf constants
292 nfs_mbuf_mhlen
= ms
.mhlen
;
293 nfs_mbuf_minclsize
= ms
.minclsize
;
299 * allocate a list of mbufs to hold the given amount of data
302 nfsm_mbuf_get_list(size_t size
, mbuf_t
*mp
, int *mbcnt
)
305 mbuf_t mhead
, mlast
, m
;
309 mhead
= mlast
= NULL
;
313 nfsm_mbuf_get(error
, &m
, (size
- len
));
320 if (mlast
&& ((error
= mbuf_setnext(mlast
, m
)))) {
324 mlen
= mbuf_maxlen(m
);
325 if ((len
+ mlen
) > size
) {
328 mbuf_setlen(m
, mlen
);
341 #endif /* NFSSERVER */
344 * nfsm_chain_new_mbuf()
346 * Add a new mbuf to the given chain.
349 nfsm_chain_new_mbuf(struct nfsm_chain
*nmc
, size_t sizehint
)
354 if (nmc
->nmc_flags
& NFSM_CHAIN_FLAG_ADD_CLUSTERS
) {
355 sizehint
= nfs_mbuf_minclsize
;
358 /* allocate a new mbuf */
359 nfsm_mbuf_get(error
, &mb
, sizehint
);
364 panic("got NULL mbuf?");
367 /* do we have a current mbuf? */
369 /* first cap off current mbuf */
370 mbuf_setlen(nmc
->nmc_mcur
, nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
));
371 /* then append the new mbuf */
372 error
= mbuf_setnext(nmc
->nmc_mcur
, mb
);
379 /* set up for using the new mbuf */
381 nmc
->nmc_ptr
= mbuf_data(mb
);
382 nmc
->nmc_left
= mbuf_trailingspace(mb
);
388 * nfsm_chain_add_opaque_f()
390 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
393 nfsm_chain_add_opaque_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
395 uint32_t paddedlen
, tlen
;
398 paddedlen
= nfsm_rndup(len
);
401 if (!nmc
->nmc_left
) {
402 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
407 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
413 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
415 bzero(nmc
->nmc_ptr
, tlen
);
417 nmc
->nmc_ptr
+= tlen
;
418 nmc
->nmc_left
-= tlen
;
430 * nfsm_chain_add_opaque_nopad_f()
432 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
436 nfsm_chain_add_opaque_nopad_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
442 if (nmc
->nmc_left
<= 0) {
443 error
= nfsm_chain_new_mbuf(nmc
, len
);
448 tlen
= MIN(nmc
->nmc_left
, len
);
449 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
450 nmc
->nmc_ptr
+= tlen
;
451 nmc
->nmc_left
-= tlen
;
459 * nfsm_chain_add_uio()
461 * Add "len" bytes of data from "uio" to the given chain.
464 nfsm_chain_add_uio(struct nfsm_chain
*nmc
, uio_t uio
, uint32_t len
)
466 uint32_t paddedlen
, tlen
;
469 paddedlen
= nfsm_rndup(len
);
472 if (!nmc
->nmc_left
) {
473 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
478 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
484 uiomove(nmc
->nmc_ptr
, tlen
, uio
);
486 bzero(nmc
->nmc_ptr
, tlen
);
488 nmc
->nmc_ptr
+= tlen
;
489 nmc
->nmc_left
-= tlen
;
500 * Find the length of the NFS mbuf chain
501 * up to the current encoding/decoding offset.
504 nfsm_chain_offset(struct nfsm_chain
*nmc
)
509 for (mb
= nmc
->nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
510 if (mb
== nmc
->nmc_mcur
) {
511 return len
+ (nmc
->nmc_ptr
- (caddr_t
) mbuf_data(mb
));
520 * nfsm_chain_advance()
522 * Advance an nfsm_chain by "len" bytes.
525 nfsm_chain_advance(struct nfsm_chain
*nmc
, uint32_t len
)
530 if (nmc
->nmc_left
>= len
) {
531 nmc
->nmc_left
-= len
;
535 len
-= nmc
->nmc_left
;
536 nmc
->nmc_mcur
= mb
= mbuf_next(nmc
->nmc_mcur
);
540 nmc
->nmc_ptr
= mbuf_data(mb
);
541 nmc
->nmc_left
= mbuf_len(mb
);
548 * nfsm_chain_reverse()
550 * Reverse decode offset in an nfsm_chain by "len" bytes.
553 nfsm_chain_reverse(struct nfsm_chain
*nmc
, uint32_t len
)
555 uint32_t mlen
, new_offset
;
558 mlen
= nmc
->nmc_ptr
- (caddr_t
) mbuf_data(nmc
->nmc_mcur
);
561 nmc
->nmc_left
+= len
;
565 new_offset
= nfsm_chain_offset(nmc
) - len
;
566 nfsm_chain_dissect_init(error
, nmc
, nmc
->nmc_mhead
);
571 return nfsm_chain_advance(nmc
, new_offset
);
575 * nfsm_chain_get_opaque_pointer_f()
577 * Return a pointer to the next "len" bytes of contiguous data in
578 * the mbuf chain. If the next "len" bytes are not contiguous, we
579 * try to manipulate the mbuf chain so that it is.
581 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
584 nfsm_chain_get_opaque_pointer_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
**pptr
)
587 uint32_t left
, need
, mblen
, cplen
, padlen
;
591 /* move to next mbuf with data */
592 while (nmc
->nmc_mcur
&& (nmc
->nmc_left
== 0)) {
593 mb
= mbuf_next(nmc
->nmc_mcur
);
598 nmc
->nmc_ptr
= mbuf_data(mb
);
599 nmc
->nmc_left
= mbuf_len(mb
);
601 /* check if we've run out of data */
602 if (!nmc
->nmc_mcur
) {
606 /* do we already have a contiguous buffer? */
607 if (nmc
->nmc_left
>= len
) {
608 /* the returned pointer will be the current pointer */
609 *pptr
= (u_char
*)nmc
->nmc_ptr
;
610 error
= nfsm_chain_advance(nmc
, nfsm_rndup(len
));
614 padlen
= nfsm_rndup(len
) - len
;
616 /* we need (len - left) more bytes */
617 mbcur
= nmc
->nmc_mcur
;
618 left
= nmc
->nmc_left
;
621 if (need
> mbuf_trailingspace(mbcur
)) {
623 * The needed bytes won't fit in the current mbuf so we'll
624 * allocate a new mbuf to hold the contiguous range of data.
626 nfsm_mbuf_get(error
, &mb
, len
);
630 /* double check that this mbuf can hold all the data */
631 if (mbuf_maxlen(mb
) < len
) {
636 /* the returned pointer will be the new mbuf's data pointer */
637 *pptr
= ptr
= mbuf_data(mb
);
639 /* copy "left" bytes to the new mbuf */
640 bcopy(nmc
->nmc_ptr
, ptr
, left
);
642 mbuf_setlen(mb
, left
);
644 /* insert the new mbuf between the current and next mbufs */
645 error
= mbuf_setnext(mb
, mbuf_next(mbcur
));
647 error
= mbuf_setnext(mbcur
, mb
);
654 /* reduce current mbuf's length by "left" */
655 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - left
);
658 * update nmc's state to point at the end of the mbuf
659 * where the needed data will be copied to.
661 nmc
->nmc_mcur
= mbcur
= mb
;
663 nmc
->nmc_ptr
= (caddr_t
)ptr
;
665 /* The rest of the data will fit in this mbuf. */
667 /* the returned pointer will be the current pointer */
668 *pptr
= (u_char
*)nmc
->nmc_ptr
;
671 * update nmc's state to point at the end of the mbuf
672 * where the needed data will be copied to.
674 nmc
->nmc_ptr
+= left
;
679 * move the next "need" bytes into the current
680 * mbuf from the mbufs that follow
683 /* extend current mbuf length */
684 mbuf_setlen(mbcur
, mbuf_len(mbcur
) + need
);
686 /* mb follows mbufs we're copying/compacting data from */
687 mb
= mbuf_next(mbcur
);
690 /* copy as much as we need/can */
692 mblen
= mbuf_len(mb
);
693 cplen
= MIN(mblen
, need
);
695 bcopy(ptr
, nmc
->nmc_ptr
, cplen
);
697 * update the mbuf's pointer and length to reflect that
698 * the data was shifted to an earlier mbuf in the chain
700 error
= mbuf_setdata(mb
, ptr
+ cplen
, mblen
- cplen
);
702 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
705 /* update pointer/need */
706 nmc
->nmc_ptr
+= cplen
;
709 /* if more needed, go to next mbuf */
715 /* did we run out of data in the mbuf chain? */
717 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
722 * update nmc's state to point after this contiguous data
724 * "mb" points to the last mbuf we copied data from so we
725 * just set nmc to point at whatever remains in that mbuf.
728 nmc
->nmc_ptr
= mbuf_data(mb
);
729 nmc
->nmc_left
= mbuf_len(mb
);
731 /* move past any padding */
733 error
= nfsm_chain_advance(nmc
, padlen
);
740 * nfsm_chain_get_opaque_f()
742 * Read the next "len" bytes in the chain into "buf".
743 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
746 nfsm_chain_get_opaque_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
*buf
)
748 uint32_t cplen
, padlen
;
751 padlen
= nfsm_rndup(len
) - len
;
753 /* loop through mbufs copying all the data we need */
754 while (len
&& nmc
->nmc_mcur
) {
755 /* copy as much as we need/can */
756 cplen
= MIN(nmc
->nmc_left
, len
);
758 bcopy(nmc
->nmc_ptr
, buf
, cplen
);
759 nmc
->nmc_ptr
+= cplen
;
760 nmc
->nmc_left
-= cplen
;
764 /* if more needed, go to next mbuf */
766 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
768 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
769 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
773 /* did we run out of data in the mbuf chain? */
779 nfsm_chain_adv(error
, nmc
, padlen
);
786 * nfsm_chain_get_uio()
788 * Read the next "len" bytes in the chain into the given uio.
789 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
792 nfsm_chain_get_uio(struct nfsm_chain
*nmc
, uint32_t len
, uio_t uio
)
794 uint32_t cplen
, padlen
;
797 padlen
= nfsm_rndup(len
) - len
;
799 /* loop through mbufs copying all the data we need */
800 while (len
&& nmc
->nmc_mcur
) {
801 /* copy as much as we need/can */
802 cplen
= MIN(nmc
->nmc_left
, len
);
804 error
= uiomove(nmc
->nmc_ptr
, cplen
, uio
);
808 nmc
->nmc_ptr
+= cplen
;
809 nmc
->nmc_left
-= cplen
;
812 /* if more needed, go to next mbuf */
814 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
816 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
817 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
821 /* did we run out of data in the mbuf chain? */
827 nfsm_chain_adv(error
, nmc
, padlen
);
836 nfsm_chain_add_string_nfc(struct nfsm_chain
*nmc
, const uint8_t *s
, uint32_t slen
)
838 uint8_t smallbuf
[64];
839 uint8_t *nfcname
= smallbuf
;
840 size_t buflen
= sizeof(smallbuf
), nfclen
;
843 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
844 if (error
== ENAMETOOLONG
) {
846 MALLOC_ZONE(nfcname
, uint8_t *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
848 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
852 /* if we got an error, just use the original string */
854 nfsm_chain_add_string(error
, nmc
, s
, slen
);
856 nfsm_chain_add_string(error
, nmc
, nfcname
, nfclen
);
859 if (nfcname
&& (nfcname
!= smallbuf
)) {
860 FREE_ZONE(nfcname
, MAXPATHLEN
, M_NAMEI
);
866 * Add an NFSv2 "sattr" structure to an mbuf chain
869 nfsm_chain_add_v2sattr_f(struct nfsm_chain
*nmc
, struct vnode_attr
*vap
, uint32_t szrdev
)
873 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
,
874 (VATTR_IS_ACTIVE(vap
, va_mode
) ? vap
->va_mode
: 0600)));
875 nfsm_chain_add_32(error
, nmc
,
876 VATTR_IS_ACTIVE(vap
, va_uid
) ? vap
->va_uid
: (uint32_t)-1);
877 nfsm_chain_add_32(error
, nmc
,
878 VATTR_IS_ACTIVE(vap
, va_gid
) ? vap
->va_gid
: (uint32_t)-1);
879 nfsm_chain_add_32(error
, nmc
, szrdev
);
880 nfsm_chain_add_v2time(error
, nmc
,
881 VATTR_IS_ACTIVE(vap
, va_access_time
) ?
882 &vap
->va_access_time
: NULL
);
883 nfsm_chain_add_v2time(error
, nmc
,
884 VATTR_IS_ACTIVE(vap
, va_modify_time
) ?
885 &vap
->va_modify_time
: NULL
);
891 * Add an NFSv3 "sattr" structure to an mbuf chain
894 nfsm_chain_add_v3sattr_f(
895 struct nfsmount
*nmp
,
896 struct nfsm_chain
*nmc
,
897 struct vnode_attr
*vap
)
901 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
902 nfsm_chain_add_32(error
, nmc
, TRUE
);
903 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
);
905 nfsm_chain_add_32(error
, nmc
, FALSE
);
907 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
908 nfsm_chain_add_32(error
, nmc
, TRUE
);
909 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
911 nfsm_chain_add_32(error
, nmc
, FALSE
);
913 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
914 nfsm_chain_add_32(error
, nmc
, TRUE
);
915 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
917 nfsm_chain_add_32(error
, nmc
, FALSE
);
919 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
920 nfsm_chain_add_32(error
, nmc
, TRUE
);
921 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
923 nfsm_chain_add_32(error
, nmc
, FALSE
);
925 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
926 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
927 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
929 if (VATTR_IS_ACTIVE(vap
, va_access_time
)) {
930 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
931 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_sec
);
932 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_nsec
);
934 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
936 if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) {
937 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
938 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_sec
);
939 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_nsec
);
941 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
951 * nfsm_chain_get_fh_attr()
953 * Get the file handle and attributes from an mbuf chain. (NFSv2/v3)
956 nfsm_chain_get_fh_attr(
957 struct nfsmount
*nmp
,
958 struct nfsm_chain
*nmc
,
964 struct nfs_vattr
*nvap
)
966 int error
= 0, gotfh
, gotattr
;
970 if (nfsvers
== NFS_VER3
) { /* check for file handle */
971 nfsm_chain_get_32(error
, nmc
, gotfh
);
973 if (!error
&& gotfh
) { /* get file handle */
974 nfsm_chain_get_fh(error
, nmc
, nfsvers
, fhp
);
978 if (nfsvers
== NFS_VER3
) { /* check for file attributes */
979 nfsm_chain_get_32(error
, nmc
, gotattr
);
983 if (!gotfh
) { /* skip attributes */
984 nfsm_chain_adv(error
, nmc
, NFSX_V3FATTR
);
985 } else { /* get attributes */
986 error
= nfs_parsefattr(nmp
, nmc
, nfsvers
, nvap
);
989 /* we need valid attributes in order to call nfs_nget() */
990 if (nfs3_getattr_rpc(NULL
, NFSTOMP(dnp
), fhp
->fh_data
, fhp
->fh_len
, 0, ctx
, nvap
, xidp
)) {
1000 * Get and process NFSv3 WCC data from an mbuf chain
1003 nfsm_chain_get_wcc_data_f(
1004 struct nfsm_chain
*nmc
,
1006 struct timespec
*premtime
,
1013 nfsm_chain_get_32(error
, nmc
, flag
);
1014 if (!error
&& flag
) {
1015 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1016 nfsm_chain_get_32(error
, nmc
, premtime
->tv_sec
);
1017 nfsm_chain_get_32(error
, nmc
, premtime
->tv_nsec
);
1018 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1020 premtime
->tv_sec
= 0;
1021 premtime
->tv_nsec
= 0;
1023 nfsm_chain_postop_attr_update_flag(error
, nmc
, np
, *newpostattr
, xidp
);
1029 * Get the next RPC transaction ID (XID)
1032 nfs_get_xid(uint64_t *xidp
)
1036 lck_mtx_lock(nfs_request_mutex
);
1039 * Derive initial xid from system time.
1041 * Note: it's OK if this code inits nfs_xid to 0 (for example,
1042 * due to a broken clock) because we immediately increment it
1043 * and we guarantee to never use xid 0. So, nfs_xid should only
1044 * ever be 0 the first time this function is called.
1047 nfs_xid
= tv
.tv_sec
<< 12;
1049 if (++nfs_xid
== 0) {
1050 /* Skip zero xid if it should ever happen. */
1054 *xidp
= nfs_xid
+ ((uint64_t)nfs_xidwrap
<< 32);
1055 lck_mtx_unlock(nfs_request_mutex
);
1059 * Build the RPC header and fill in the authorization info.
1060 * Returns the head of the mbuf list and the xid.
1070 struct nfsmount
*nmp
= req
->r_nmp
;
1071 int nfsvers
= nmp
->nm_vers
;
1072 int proc
= ((nfsvers
== NFS_VER2
) ? nfsv2_procid
[req
->r_procnum
] : (int)req
->r_procnum
);
1074 return nfsm_rpchead2(nmp
, nmp
->nm_sotype
, NFS_PROG
, nfsvers
, proc
,
1075 req
->r_auth
, req
->r_cred
, req
, mrest
, xidp
, mreqp
);
1079 * get_auiliary_groups: Gets the supplementary groups from a credential.
1081 * IN: cred: credential to get the associated groups from.
1082 * OUT: groups: An array of gids of NGROUPS size.
1083 * IN: count: The number of groups to get; i.e.; the number of groups the server supports
1085 * returns: The number of groups found.
1087 * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less
1091 get_auxiliary_groups(kauth_cred_t cred
, gid_t groups
[NGROUPS
], int count
)
1094 int maxcount
= count
< NGROUPS
? count
+ 1 : NGROUPS
;
1097 for (i
= 0; i
< NGROUPS
; i
++) {
1098 groups
[i
] = -2; /* Initialize to the nobody group */
1100 (void)kauth_cred_getgroups(cred
, groups
, &maxcount
);
1106 * kauth_get_groups returns the primary group followed by the
1107 * users auxiliary groups. If the number of groups the server supports
1108 * is less than NGROUPS, then we will drop the first group so that
1109 * we can send one more group over the wire.
1113 if (count
< NGROUPS
) {
1114 pgid
= kauth_cred_getgid(cred
);
1115 if (pgid
== groups
[0]) {
1117 for (i
= 0; i
< maxcount
; i
++) {
1118 groups
[i
] = groups
[i
+ 1];
1127 nfsm_rpchead2(struct nfsmount
*nmp
, int sotype
, int prog
, int vers
, int proc
, int auth_type
,
1128 kauth_cred_t cred
, struct nfsreq
*req
, mbuf_t mrest
, u_int64_t
*xidp
, mbuf_t
*mreqp
)
1131 int error
, i
, auth_len
= 0, authsiz
, reqlen
;
1133 struct nfsm_chain nmreq
;
1134 gid_t grouplist
[NGROUPS
];
1137 /* calculate expected auth length */
1138 switch (auth_type
) {
1144 int count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1149 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1150 if (groupcount
< 0) {
1153 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1160 if (!req
|| !cred
) {
1163 auth_len
= 5 * NFSX_UNSIGNED
+ 0; // zero context handle for now
1165 #endif /* CONFIG_NFS_GSS */
1169 authsiz
= nfsm_rndup(auth_len
);
1171 /* allocate the packet */
1172 headlen
= authsiz
+ 10 * NFSX_UNSIGNED
;
1173 if (sotype
== SOCK_STREAM
) { /* also include room for any RPC Record Mark */
1174 headlen
+= NFSX_UNSIGNED
;
1176 if (headlen
>= nfs_mbuf_minclsize
) {
1177 error
= mbuf_getpacket(MBUF_WAITOK
, &mreq
);
1179 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mreq
);
1181 if (headlen
< nfs_mbuf_mhlen
) {
1182 mbuf_align_32(mreq
, headlen
);
1184 mbuf_align_32(mreq
, 8 * NFSX_UNSIGNED
);
1189 /* unable to allocate packet */
1190 /* XXX should we keep statistics for these errors? */
1195 * If the caller gave us a non-zero XID then use it because
1196 * it may be a higher-level resend with a GSSAPI credential.
1197 * Otherwise, allocate a new one.
1203 /* build the header(s) */
1204 nfsm_chain_init(&nmreq
, mreq
);
1206 /* First, if it's a TCP stream insert space for an RPC record mark */
1207 if (sotype
== SOCK_STREAM
) {
1208 nfsm_chain_add_32(error
, &nmreq
, 0);
1211 /* Then the RPC header. */
1212 nfsm_chain_add_32(error
, &nmreq
, (*xidp
& 0xffffffff));
1213 nfsm_chain_add_32(error
, &nmreq
, RPC_CALL
);
1214 nfsm_chain_add_32(error
, &nmreq
, RPC_VER2
);
1215 nfsm_chain_add_32(error
, &nmreq
, prog
);
1216 nfsm_chain_add_32(error
, &nmreq
, vers
);
1217 nfsm_chain_add_32(error
, &nmreq
, proc
);
1222 switch (auth_type
) {
1224 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* auth */
1225 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1226 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* verf */
1227 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1228 nfsm_chain_build_done(error
, &nmreq
);
1229 /* Append the args mbufs */
1231 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1235 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_SYS
);
1236 nfsm_chain_add_32(error
, &nmreq
, authsiz
);
1238 nfsm_chain_add_32(error
, &nmreq
, 0); /* stamp */
1240 nfsm_chain_add_32(error
, &nmreq
, 0); /* zero-length hostname */
1241 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(cred
)); /* UID */
1242 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getgid(cred
)); /* GID */
1243 nfsm_chain_add_32(error
, &nmreq
, groupcount
);/* additional GIDs */
1244 for (i
= 0; i
< groupcount
; i
++) {
1245 nfsm_chain_add_32(error
, &nmreq
, grouplist
[i
]);
1248 /* And the verifier... */
1249 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* flavor */
1250 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1251 nfsm_chain_build_done(error
, &nmreq
);
1253 /* Append the args mbufs */
1255 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1263 error
= nfs_gss_clnt_cred_put(req
, &nmreq
, mrest
);
1264 if (error
== ENEEDAUTH
) {
1265 int count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1268 * Use sec=sys for this user
1271 req
->r_auth
= auth_type
= RPCAUTH_SYS
;
1272 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1273 if (groupcount
< 0) {
1276 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1277 authsiz
= nfsm_rndup(auth_len
);
1281 #endif /* CONFIG_NFS_GSS */
1285 /* finish setting up the packet */
1287 error
= mbuf_pkthdr_setrcvif(mreq
, 0);
1295 /* Calculate the size of the request */
1297 for (mb
= nmreq
.nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
1298 reqlen
+= mbuf_len(mb
);
1301 mbuf_pkthdr_setlen(mreq
, reqlen
);
1304 * If the request goes on a TCP stream,
1305 * set its size in the RPC record mark.
1306 * The record mark count doesn't include itself
1307 * and the last fragment bit is set.
1309 if (sotype
== SOCK_STREAM
) {
1310 nfsm_chain_set_recmark(error
, &nmreq
,
1311 (reqlen
- NFSX_UNSIGNED
) | 0x80000000);
1319 * Parse an NFS file attribute structure out of an mbuf chain.
1323 struct nfsmount
*nmp
,
1324 struct nfsm_chain
*nmc
,
1326 struct nfs_vattr
*nvap
)
1338 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TYPE
);
1339 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_MODE
);
1340 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
);
1341 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
);
1342 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
);
1343 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SIZE
);
1344 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SPACE_USED
);
1345 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_RAWDEV
);
1346 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FSID
);
1347 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FILEID
);
1348 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_ACCESS
);
1349 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
);
1350 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_METADATA
);
1352 nfsm_chain_get_32(error
, nmc
, nvtype
);
1353 nfsm_chain_get_32(error
, nmc
, vmode
);
1356 if (nfsvers
== NFS_VER3
) {
1357 nvap
->nva_type
= vtype
= nfstov_type(nvtype
, nfsvers
);
1360 * The duplicate information returned in fa_type and fa_mode
1361 * is an ambiguity in the NFS version 2 protocol.
1363 * VREG should be taken literally as a regular file. If a
1364 * server intends to return some type information differently
1365 * in the upper bits of the mode field (e.g. for sockets, or
1366 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
1367 * leave the examination of the mode bits even in the VREG
1368 * case to avoid breakage for bogus servers, but we make sure
1369 * that there are actually type bits set in the upper part of
1370 * fa_mode (and failing that, trust the va_type field).
1372 * NFSv3 cleared the issue, and requires fa_mode to not
1373 * contain any type information (while also introducing
1374 * sockets and FIFOs for fa_type).
1376 vtype
= nfstov_type(nvtype
, nfsvers
);
1377 if ((vtype
== VNON
) || ((vtype
== VREG
) && ((vmode
& S_IFMT
) != 0))) {
1378 vtype
= IFTOVT(vmode
);
1380 nvap
->nva_type
= vtype
;
1383 nvap
->nva_mode
= (vmode
& 07777);
1385 nfsm_chain_get_32(error
, nmc
, nvap
->nva_nlink
);
1386 nfsm_chain_get_32(error
, nmc
, nvap
->nva_uid
);
1387 nfsm_chain_get_32(error
, nmc
, nvap
->nva_gid
);
1389 if (nfsvers
== NFS_VER3
) {
1390 nfsm_chain_get_64(error
, nmc
, nvap
->nva_size
);
1391 nfsm_chain_get_64(error
, nmc
, nvap
->nva_bytes
);
1392 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata1
);
1393 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata2
);
1395 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fsid
.major
);
1396 nvap
->nva_fsid
.minor
= 0;
1397 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fileid
);
1399 nfsm_chain_get_32(error
, nmc
, nvap
->nva_size
);
1400 nfsm_chain_adv(error
, nmc
, NFSX_UNSIGNED
);
1401 nfsm_chain_get_32(error
, nmc
, rdev
);
1403 nvap
->nva_rawdev
.specdata1
= major(rdev
);
1404 nvap
->nva_rawdev
.specdata2
= minor(rdev
);
1405 nfsm_chain_get_32(error
, nmc
, val
); /* blocks */
1407 nvap
->nva_bytes
= val
* NFS_FABLKSIZE
;
1408 nfsm_chain_get_32(error
, nmc
, val
);
1410 nvap
->nva_fsid
.major
= (uint64_t)val
;
1411 nvap
->nva_fsid
.minor
= 0;
1412 nfsm_chain_get_32(error
, nmc
, val
);
1414 nvap
->nva_fileid
= (uint64_t)val
;
1415 /* Really ugly NFSv2 kludge. */
1416 if ((vtype
== VCHR
) && (rdev
== (dev_t
)0xffffffff)) {
1417 nvap
->nva_type
= VFIFO
;
1420 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1421 nvap
->nva_timesec
[NFSTIME_ACCESS
],
1422 nvap
->nva_timensec
[NFSTIME_ACCESS
]);
1423 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1424 nvap
->nva_timesec
[NFSTIME_MODIFY
],
1425 nvap
->nva_timensec
[NFSTIME_MODIFY
]);
1426 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1427 nvap
->nva_timesec
[NFSTIME_CHANGE
],
1428 nvap
->nva_timensec
[NFSTIME_CHANGE
]);
1436 * Load the attribute cache (that lives in the nfsnode entry) with
1437 * the value pointed to by nvap, unless the file type in the attribute
1438 * cache doesn't match the file type in the nvap, in which case log a
1439 * warning and return ESTALE.
1441 * If the dontshrink flag is set, then it's not safe to call ubc_setsize()
1442 * to shrink the size of the file.
1447 struct nfs_vattr
*nvap
,
1454 struct nfs_vattr
*npnvap
;
1455 int xattr
= np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
;
1456 int referral
= np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
;
1457 int aclbit
, monitored
, error
= 0;
1459 struct nfsmount
*nmp
;
1460 uint32_t events
= np
->n_events
;
1462 if (np
->n_hflag
& NHINIT
) {
1467 mp
= vnode_mount(vp
);
1469 monitored
= vp
? vnode_ismonitored(vp
) : 0;
1471 FSDBG_TOP(527, np
, vp
, *xidp
>> 32, *xidp
);
1473 if (!((nmp
= VFSTONFS(mp
)))) {
1474 FSDBG_BOT(527, ENXIO
, 1, 0, *xidp
);
1478 if (*xidp
< np
->n_xid
) {
1480 * We have already updated attributes with a response from
1481 * a later request. The attributes we have here are probably
1482 * stale so we drop them (just return). However, our
1483 * out-of-order receipt could be correct - if the requests were
1484 * processed out of order at the server. Given the uncertainty
1485 * we invalidate our cached attributes. *xidp is zeroed here
1486 * to indicate the attributes were dropped - only getattr
1487 * cares - it needs to retry the rpc.
1489 NATTRINVALIDATE(np
);
1490 FSDBG_BOT(527, 0, np
, np
->n_xid
, *xidp
);
1495 if (vp
&& (nvap
->nva_type
!= vnode_vtype(vp
))) {
1497 * The filehandle has changed type on us. This can be
1498 * caused by either the server not having unique filehandles
1499 * or because another client has removed the previous
1500 * filehandle and a new object (of a different type)
1501 * has been created with the same filehandle.
1503 * We can't simply switch the type on the vnode because
1504 * there may be type-specific fields that need to be
1505 * cleaned up or set up.
1507 * So, what should we do with this vnode?
1509 * About the best we can do is log a warning and return
1510 * an error. ESTALE is about the closest error, but it
1511 * is a little strange that we come up with this error
1512 * internally instead of simply passing it through from
1513 * the server. Hopefully, the vnode will be reclaimed
1514 * soon so the filehandle can be reincarnated as the new
1517 printf("nfs loadattrcache vnode changed type, was %d now %d\n",
1518 vnode_vtype(vp
), nvap
->nva_type
);
1521 events
|= VNODE_EVENT_DELETE
;
1526 npnvap
= &np
->n_vattr
;
1529 * The ACL cache needs special handling because it is not
1530 * always updated. Save current ACL cache state so it can
1531 * be restored after copying the new attributes into place.
1533 aclbit
= NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1534 acl
= npnvap
->nva_acl
;
1538 * For monitored nodes, check for attribute changes that should generate events.
1540 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
) &&
1541 (nvap
->nva_nlink
!= npnvap
->nva_nlink
)) {
1542 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_LINK
;
1544 if (events
& VNODE_EVENT_PERMS
) {
1545 /* no need to do all the checking if it's already set */;
1546 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_MODE
) &&
1547 (nvap
->nva_mode
!= npnvap
->nva_mode
)) {
1548 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1549 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1550 (nvap
->nva_uid
!= npnvap
->nva_uid
)) {
1551 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1552 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1553 (nvap
->nva_gid
!= npnvap
->nva_gid
)) {
1554 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1556 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1557 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1558 !kauth_guid_equal(&nvap
->nva_uuuid
, &npnvap
->nva_uuuid
)) {
1559 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1560 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1561 !kauth_guid_equal(&nvap
->nva_guuid
, &npnvap
->nva_guuid
)) {
1562 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1563 } else if ((NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
) &&
1564 nvap
->nva_acl
&& npnvap
->nva_acl
&&
1565 ((nvap
->nva_acl
->acl_entrycount
!= npnvap
->nva_acl
->acl_entrycount
) ||
1566 bcmp(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
))))) {
1567 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1573 ((nmp
->nm_vers
>= NFS_VER4
) && (nvap
->nva_change
!= npnvap
->nva_change
)) ||
1575 (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
) &&
1576 ((nvap
->nva_timesec
[NFSTIME_MODIFY
] != npnvap
->nva_timesec
[NFSTIME_MODIFY
]) ||
1577 (nvap
->nva_timensec
[NFSTIME_MODIFY
] != npnvap
->nva_timensec
[NFSTIME_MODIFY
])))) {
1578 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_WRITE
;
1580 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_RAWDEV
) &&
1581 ((nvap
->nva_rawdev
.specdata1
!= npnvap
->nva_rawdev
.specdata1
) ||
1582 (nvap
->nva_rawdev
.specdata2
!= npnvap
->nva_rawdev
.specdata2
))) {
1583 events
|= VNODE_EVENT_ATTRIB
;
1585 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_FILEID
) &&
1586 (nvap
->nva_fileid
!= npnvap
->nva_fileid
)) {
1587 events
|= VNODE_EVENT_ATTRIB
;
1589 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1590 ((nvap
->nva_flags
& NFS_FFLAG_ARCHIVED
) != (npnvap
->nva_flags
& NFS_FFLAG_ARCHIVED
))) {
1591 events
|= VNODE_EVENT_ATTRIB
;
1593 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1594 ((nvap
->nva_flags
& NFS_FFLAG_HIDDEN
) != (npnvap
->nva_flags
& NFS_FFLAG_HIDDEN
))) {
1595 events
|= VNODE_EVENT_ATTRIB
;
1597 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_CREATE
) &&
1598 ((nvap
->nva_timesec
[NFSTIME_CREATE
] != npnvap
->nva_timesec
[NFSTIME_CREATE
]) ||
1599 (nvap
->nva_timensec
[NFSTIME_CREATE
] != npnvap
->nva_timensec
[NFSTIME_CREATE
]))) {
1600 events
|= VNODE_EVENT_ATTRIB
;
1602 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_BACKUP
) &&
1603 ((nvap
->nva_timesec
[NFSTIME_BACKUP
] != npnvap
->nva_timesec
[NFSTIME_BACKUP
]) ||
1604 (nvap
->nva_timensec
[NFSTIME_BACKUP
] != npnvap
->nva_timensec
[NFSTIME_BACKUP
]))) {
1605 events
|= VNODE_EVENT_ATTRIB
;
1609 /* Copy the attributes to the attribute cache */
1610 bcopy((caddr_t
)nvap
, (caddr_t
)npnvap
, sizeof(*nvap
));
1613 np
->n_attrstamp
= now
.tv_sec
;
1615 /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */
1617 nvap
->nva_flags
|= xattr
;
1619 if (vp
&& referral
) {
1620 nvap
->nva_flags
|= referral
;
1623 if (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1624 /* we're updating the ACL */
1625 if (nvap
->nva_acl
) {
1626 /* make a copy of the acl for the cache */
1627 npnvap
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1628 if (npnvap
->nva_acl
) {
1629 bcopy(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1631 /* can't make a copy to cache, invalidate ACL cache */
1632 NFS_BITMAP_CLR(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1638 kauth_acl_free(acl
);
1642 if (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1643 /* update the ACL timestamp */
1644 np
->n_aclstamp
= now
.tv_sec
;
1646 /* we aren't updating the ACL, so restore original values */
1648 NFS_BITMAP_SET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1650 npnvap
->nva_acl
= acl
;
1656 * For NFSv4, if the fsid doesn't match the fsid for the mount, then
1657 * this node is for a different file system on the server. So we mark
1658 * this node as a trigger node that will trigger the mirror mount.
1660 if ((nmp
->nm_vers
>= NFS_VER4
) && (nvap
->nva_type
== VDIR
) &&
1661 ((np
->n_vattr
.nva_fsid
.major
!= nmp
->nm_fsid
.major
) ||
1662 (np
->n_vattr
.nva_fsid
.minor
!= nmp
->nm_fsid
.minor
))) {
1663 np
->n_vattr
.nva_flags
|= NFS_FFLAG_TRIGGER
;
1665 #endif /* CONFIG_NFS4 */
1666 #endif /* CONFIG_TRIGGERS */
1668 if (!vp
|| (nvap
->nva_type
!= VREG
)) {
1669 np
->n_size
= nvap
->nva_size
;
1670 } else if (nvap
->nva_size
!= np
->n_size
) {
1671 FSDBG(527, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1672 if (!UBCINFOEXISTS(vp
) || (dontshrink
&& (nvap
->nva_size
< np
->n_size
))) {
1673 /* asked not to shrink, so stick with current size */
1674 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001);
1675 nvap
->nva_size
= np
->n_size
;
1676 NATTRINVALIDATE(np
);
1677 } else if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1678 /* if we've modified, stick with larger size */
1679 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0002);
1680 nvap
->nva_size
= np
->n_size
;
1681 npnvap
->nva_size
= np
->n_size
;
1684 * n_size is protected by the data lock, so we need to
1685 * defer updating it until it's safe. We save the new size
1686 * and set a flag and it'll get updated the next time we get/drop
1687 * the data lock or the next time we do a getattr.
1689 np
->n_newsize
= nvap
->nva_size
;
1690 SET(np
->n_flag
, NUPDATESIZE
);
1692 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_EXTEND
;
1697 if (np
->n_flag
& NCHG
) {
1698 if (np
->n_flag
& NACC
) {
1699 nvap
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1700 nvap
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1702 if (np
->n_flag
& NUPD
) {
1703 nvap
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1704 nvap
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1709 if (monitored
&& events
) {
1710 nfs_vnode_notify(np
, events
);
1712 FSDBG_BOT(527, error
, np
, np
->n_size
, *xidp
);
1717 * Calculate the attribute timeout based on
1718 * how recently the file has been modified.
1721 nfs_attrcachetimeout(nfsnode_t np
)
1723 struct nfsmount
*nmp
;
1729 if (nfs_mount_gone(nmp
)) {
1733 isdir
= vnode_isdir(NFSTOV(np
));
1735 if ((nmp
->nm_vers
>= NFS_VER4
) && (np
->n_openflags
& N_DELEG_MASK
)) {
1736 /* If we have a delegation, we always use the max timeout. */
1737 timeo
= isdir
? nmp
->nm_acdirmax
: nmp
->nm_acregmax
;
1740 if ((np
)->n_flag
& NMODIFIED
) {
1741 /* If we have modifications, we always use the min timeout. */
1742 timeo
= isdir
? nmp
->nm_acdirmin
: nmp
->nm_acregmin
;
1744 /* Otherwise, we base the timeout on how old the file seems. */
1745 /* Note that if the client and server clocks are way out of sync, */
1746 /* timeout will probably get clamped to a min or max value */
1748 timeo
= (now
.tv_sec
- (np
)->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]) / 10;
1750 if (timeo
< nmp
->nm_acdirmin
) {
1751 timeo
= nmp
->nm_acdirmin
;
1752 } else if (timeo
> nmp
->nm_acdirmax
) {
1753 timeo
= nmp
->nm_acdirmax
;
1756 if (timeo
< nmp
->nm_acregmin
) {
1757 timeo
= nmp
->nm_acregmin
;
1758 } else if (timeo
> nmp
->nm_acregmax
) {
1759 timeo
= nmp
->nm_acregmax
;
1768 * Check the attribute cache time stamp.
1769 * If the cache is valid, copy contents to *nvaper and return 0
1770 * otherwise return an error.
1771 * Must be called with the node locked.
1774 nfs_getattrcache(nfsnode_t np
, struct nfs_vattr
*nvaper
, int flags
)
1776 struct nfs_vattr
*nvap
;
1777 struct timeval nowup
;
1779 struct nfsmount
*nmp
;
1781 /* Check if the attributes are valid. */
1782 if (!NATTRVALID(np
) || ((flags
& NGA_ACL
) && !NACLVALID(np
))) {
1783 FSDBG(528, np
, 0, 0xffffff01, ENOENT
);
1784 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1789 if (nfs_mount_gone(nmp
)) {
1793 * Verify the cached attributes haven't timed out.
1794 * If the server isn't responding, skip the check
1795 * and return cached attributes.
1797 if (!nfs_use_cache(nmp
)) {
1798 microuptime(&nowup
);
1799 if (np
->n_attrstamp
> nowup
.tv_sec
) {
1800 printf("NFS: Attribute time stamp is in the future by %ld seconds. Invalidating cache\n",
1801 np
->n_attrstamp
- nowup
.tv_sec
);
1802 NATTRINVALIDATE(np
);
1803 NACCESSINVALIDATE(np
);
1806 timeo
= nfs_attrcachetimeout(np
);
1807 if ((nowup
.tv_sec
- np
->n_attrstamp
) >= timeo
) {
1808 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1809 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1812 if ((flags
& NGA_ACL
) && ((nowup
.tv_sec
- np
->n_aclstamp
) >= timeo
)) {
1813 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1814 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1819 nvap
= &np
->n_vattr
;
1820 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, 0xcace);
1821 OSAddAtomic64(1, &nfsstats
.attrcache_hits
);
1823 if (nvap
->nva_type
!= VREG
) {
1824 np
->n_size
= nvap
->nva_size
;
1825 } else if (nvap
->nva_size
!= np
->n_size
) {
1826 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1827 if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1828 /* if we've modified, stick with larger size */
1829 nvap
->nva_size
= np
->n_size
;
1832 * n_size is protected by the data lock, so we need to
1833 * defer updating it until it's safe. We save the new size
1834 * and set a flag and it'll get updated the next time we get/drop
1835 * the data lock or the next time we do a getattr.
1837 np
->n_newsize
= nvap
->nva_size
;
1838 SET(np
->n_flag
, NUPDATESIZE
);
1842 bcopy((caddr_t
)nvap
, (caddr_t
)nvaper
, sizeof(struct nfs_vattr
));
1843 if (np
->n_flag
& NCHG
) {
1844 if (np
->n_flag
& NACC
) {
1845 nvaper
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1846 nvaper
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1848 if (np
->n_flag
& NUPD
) {
1849 nvaper
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1850 nvaper
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1853 if (nvap
->nva_acl
) {
1854 if (flags
& NGA_ACL
) {
1855 nvaper
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1856 if (!nvaper
->nva_acl
) {
1859 bcopy(nvap
->nva_acl
, nvaper
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1861 nvaper
->nva_acl
= NULL
;
1868 * When creating file system objects:
1869 * Don't bother setting UID if it's the same as the credential performing the create.
1870 * Don't bother setting GID if it's the same as the directory or credential.
1873 nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1875 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1876 if (kauth_cred_getuid(vfs_context_ucred(ctx
)) == vap
->va_uid
) {
1877 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1878 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1881 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1882 if ((vap
->va_gid
== dnp
->n_vattr
.nva_gid
) ||
1883 (kauth_cred_getgid(vfs_context_ucred(ctx
)) == vap
->va_gid
)) {
1884 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1885 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1891 * Convert a universal address string to a sockaddr structure.
1893 * Universal addresses can be in the following formats:
1895 * d = decimal (IPv4)
1896 * x = hexadecimal (IPv6)
1897 * p = port (decimal)
1902 * x:x:x:x:x:x:x:x.p.p
1903 * x:x:x:x:x:x:d.d.d.d
1904 * x:x:x:x:x:x:d.d.d.d.p.p
1906 * IPv6 strings can also have a series of zeroes elided
1907 * IPv6 strings can also have a %scope suffix at the end (after any port)
1909 * rules & exceptions:
1910 * - value before : is hex
1911 * - value before . is dec
1912 * - once . hit, all values are dec
1913 * - hex+port case means value before first dot is actually hex
1914 * - . is always preceded by digits except if last hex was double-colon
1916 * scan, converting #s to bytes
1917 * first time a . is encountered, scan the rest to count them.
1918 * 2 dots = just port
1919 * 3 dots = just IPv4 no port
1920 * 5 dots = IPv4 and port
1923 #define IS_DIGIT(C) \
1924 (((C) >= '0') && ((C) <= '9'))
1926 #define IS_XDIGIT(C) \
1928 (((C) >= 'A') && ((C) <= 'F')) || \
1929 (((C) >= 'a') && ((C) <= 'f')))
1932 nfs_uaddr2sockaddr(const char *uaddr
, struct sockaddr
*addr
)
1934 const char *p
, *pd
; /* pointers to current character in scan */
1935 const char *pnum
; /* pointer to current number to decode */
1936 const char *pscope
; /* pointer to IPv6 scope ID */
1937 uint8_t a
[18]; /* octet array to store address bytes */
1938 int i
; /* index of next octet to decode */
1939 int dci
; /* index of octet to insert double-colon zeroes */
1940 int dcount
, xdcount
; /* count of digits in current number */
1941 int needmore
; /* set when we know we need more input (e.g. after colon, period) */
1942 int dots
; /* # of dots */
1943 int hex
; /* contains hex values */
1944 unsigned long val
; /* decoded value */
1945 int s
; /* index used for sliding array to insert elided zeroes */
1947 /* AF_LOCAL address are paths that start with '/' or are empty */
1948 if (*uaddr
== '/' || *uaddr
== '\0') { /* AF_LOCAL address */
1949 struct sockaddr_un
*sun
= (struct sockaddr_un
*)addr
;
1950 sun
->sun_family
= AF_LOCAL
;
1951 sun
->sun_len
= sizeof(struct sockaddr_un
);
1952 strlcpy(sun
->sun_path
, uaddr
, sizeof(sun
->sun_path
));
1958 #define DECIMALVALUE 1
1962 if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \
1964 if (((TYPE) == DECIMALVALUE) && xdcount) \
1966 val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \
1967 if (((TYPE) == DECIMALVALUE) && (val >= 256)) \
1969 /* check if there is room left in the array */ \
1970 if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \
1972 if ((TYPE) == HEXVALUE) \
1973 a[i++] = ((val >> 8) & 0xff); \
1974 a[i++] = (val & 0xff); \
1980 i
= dcount
= xdcount
= 0;
1984 if ((*p
== ':') && (*++p
!= ':')) { /* if it starts with colon, gotta be a double */
1989 if (IS_XDIGIT(*p
)) {
1991 if (!IS_DIGIT(*p
)) {
1996 } else if (*p
== '.') {
1997 /* rest is decimal IPv4 dotted quad and/or port */
1999 /* this is the first, so count them */
2000 for (pd
= p
; *pd
; pd
++) {
2005 } else if (hex
&& (*pd
== '%')) {
2007 } else if ((*pd
< '0') || (*pd
> '9')) {
2011 if ((dots
!= 2) && (dots
!= 3) && (dots
!= 5)) {
2014 if (hex
&& (dots
== 2)) { /* hex+port */
2015 if (!dcount
&& needmore
) {
2018 if (dcount
) { /* last hex may be elided zero */
2027 dcount
= xdcount
= 0;
2030 } else if (*p
== ':') {
2035 if (!dcount
) { /* missing number, probably double colon */
2036 if (dci
>= 0) { /* can only have one double colon */
2043 dcount
= xdcount
= 0;
2047 } else if (*p
== '%') { /* scope ID delimiter */
2054 } else { /* unexpected character */
2058 if (needmore
&& !dcount
) {
2061 if (dcount
) { /* decode trailing number */
2062 GET(dots
? DECIMALVALUE
: HEXVALUE
);
2064 if (dci
>= 0) { /* got a double-colon at i, need to insert a range of zeroes */
2065 /* if we got a port, slide to end of array */
2066 /* otherwise, slide to end of address (non-port) values */
2067 int end
= ((dots
== 2) || (dots
== 5)) ? sizeof(a
) : (sizeof(a
) - 2);
2068 if (i
% 2) { /* length of zero range must be multiple of 2 */
2071 if (i
>= end
) { /* no room? */
2074 /* slide (i-dci) numbers up from index dci */
2075 for (s
= 0; s
< (i
- dci
); s
++) {
2076 a
[end
- 1 - s
] = a
[i
- 1 - s
];
2078 /* zero (end-i) numbers at index dci */
2079 for (s
= 0; s
< (end
- i
); s
++) {
2085 /* copy out resulting socket address */
2087 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
2088 if ((((dots
== 0) || (dots
== 3)) && (i
!= (sizeof(a
) - 2)))) {
2091 if ((((dots
== 2) || (dots
== 5)) && (i
!= sizeof(a
)))) {
2094 bzero(sin6
, sizeof(struct sockaddr_in6
));
2095 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
2096 sin6
->sin6_family
= AF_INET6
;
2097 bcopy(a
, &sin6
->sin6_addr
.s6_addr
, sizeof(struct in6_addr
));
2098 if ((dots
== 5) || (dots
== 2)) {
2099 sin6
->sin6_port
= htons((a
[16] << 8) | a
[17]);
2102 for (p
= pscope
; IS_DIGIT(*p
); p
++) {
2105 if (*p
&& !IS_DIGIT(*p
)) { /* name */
2106 ifnet_t interface
= NULL
;
2107 if (ifnet_find_by_name(pscope
, &interface
) == 0) {
2108 sin6
->sin6_scope_id
= ifnet_index(interface
);
2111 ifnet_release(interface
);
2113 } else { /* decimal number */
2114 sin6
->sin6_scope_id
= strtoul(pscope
, NULL
, 10);
2116 /* XXX should we also embed scope id for linklocal? */
2119 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
2120 if ((dots
!= 3) && (dots
!= 5)) {
2123 if ((dots
== 3) && (i
!= 4)) {
2126 if ((dots
== 5) && (i
!= 6)) {
2129 bzero(sin
, sizeof(struct sockaddr_in
));
2130 sin
->sin_len
= sizeof(struct sockaddr_in
);
2131 sin
->sin_family
= AF_INET
;
2132 bcopy(a
, &sin
->sin_addr
.s_addr
, sizeof(struct in_addr
));
2134 sin
->sin_port
= htons((a
[4] << 8) | a
[5]);
2141 /* NFS Client debugging support */
2142 uint32_t nfs_debug_ctl
;
2144 #include <libkern/libkern.h>
2148 nfs_printf(unsigned int facility
, unsigned int level
, const char *fmt
, ...)
2152 if (NFS_IS_DBG(facility
, level
)) {
2160 #define DISPLAYLEN 16
2165 return ch
>= 0x20 && ch
<= 0x7e;
2169 hexdump(void *data
, size_t len
)
2172 unsigned char *d
= data
;
2173 char *p
, disbuf
[3 * DISPLAYLEN
+ 1];
2175 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
2176 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3) {
2177 snprintf(p
, 4, "%2.2x ", d
[i
+ j
]);
2179 for (; j
< DISPLAYLEN
; j
++, p
+= 3) {
2180 snprintf(p
, 4, " ");
2182 printf("%s ", disbuf
);
2183 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
++) {
2184 snprintf(p
, 2, "%c", isprint(d
[i
+ j
]) ? d
[i
+ j
] : '.');
2186 printf("%s\n", disbuf
);
2191 nfs_dump_mbuf(const char *func
, int lineno
, const char *msg
, mbuf_t mb
)
2195 printf("%s:%d %s\n", func
, lineno
, msg
);
2196 for (m
= mb
; m
; m
= mbuf_next(m
)) {
2197 hexdump(mbuf_data(m
), mbuf_len(m
));
2201 /* Is a mount gone away? */
2203 nfs_mount_gone(struct nfsmount
*nmp
)
2205 return !nmp
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
));
2209 * Return some of the more significant mount options
2210 * as a string, e.g. "'ro,hard,intr,tcp,vers=3,sec=krb5,deadtimeout=0'
2213 nfs_mountopts(struct nfsmount
*nmp
, char *buf
, int buflen
)
2217 c
= snprintf(buf
, buflen
, "%s,%s,%s,%s,vers=%d,sec=%s,%sdeadtimeout=%d",
2218 (vfs_flags(nmp
->nm_mountp
) & MNT_RDONLY
) ? "ro" : "rw",
2219 NMFLAG(nmp
, SOFT
) ? "soft" : "hard",
2220 NMFLAG(nmp
, INTR
) ? "intr" : "nointr",
2221 nmp
->nm_sotype
== SOCK_STREAM
? "tcp" : "udp",
2223 nmp
->nm_auth
== RPCAUTH_KRB5
? "krb5" :
2224 nmp
->nm_auth
== RPCAUTH_KRB5I
? "krb5i" :
2225 nmp
->nm_auth
== RPCAUTH_KRB5P
? "krb5p" :
2226 nmp
->nm_auth
== RPCAUTH_SYS
? "sys" : "none",
2227 nmp
->nm_lockmode
== NFS_LOCK_MODE_ENABLED
? "locks," :
2228 nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
? "nolocks," :
2229 nmp
->nm_lockmode
== NFS_LOCK_MODE_LOCAL
? "locallocks," : "",
2230 nmp
->nm_deadtimeout
);
2232 return c
> buflen
? ENOMEM
: 0;
2235 #endif /* NFSCLIENT */
2238 * Schedule a callout thread to run an NFS timer function
2239 * interval milliseconds in the future.
2242 nfs_interval_timer_start(thread_call_t call
, int interval
)
2246 clock_interval_to_deadline(interval
, 1000 * 1000, &deadline
);
2247 thread_call_enter_delayed(call
, deadline
);
2253 int nfsrv_cmp_secflavs(struct nfs_sec
*, struct nfs_sec
*);
2254 int nfsrv_hang_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2255 int nfsrv_free_netopt(struct radix_node
*, void *);
2256 int nfsrv_free_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2257 struct nfs_export_options
*nfsrv_export_lookup(struct nfs_export
*, mbuf_t
);
2258 struct nfs_export
*nfsrv_fhtoexport(struct nfs_filehandle
*);
2259 struct nfs_user_stat_node
*nfsrv_get_user_stat_node(struct nfs_active_user_list
*, struct sockaddr
*, uid_t
);
2260 void nfsrv_init_user_list(struct nfs_active_user_list
*);
2261 void nfsrv_free_user_list(struct nfs_active_user_list
*);
2264 * add NFSv3 WCC data to an mbuf chain
2267 nfsm_chain_add_wcc_data_f(
2268 struct nfsrv_descript
*nd
,
2269 struct nfsm_chain
*nmc
,
2271 struct vnode_attr
*prevap
,
2273 struct vnode_attr
*postvap
)
2278 nfsm_chain_add_32(error
, nmc
, FALSE
);
2280 nfsm_chain_add_32(error
, nmc
, TRUE
);
2281 nfsm_chain_add_64(error
, nmc
, prevap
->va_data_size
);
2282 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_modify_time
);
2283 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_change_time
);
2285 nfsm_chain_add_postop_attr(error
, nd
, nmc
, postattrerr
, postvap
);
2291 * Extract a lookup path from the given mbufs and store it in
2292 * a newly allocated buffer saved in the given nameidata structure.
2295 nfsm_chain_get_path_namei(
2296 struct nfsm_chain
*nmc
,
2298 struct nameidata
*nip
)
2300 struct componentname
*cnp
= &nip
->ni_cnd
;
2304 if (len
> (MAXPATHLEN
- 1)) {
2305 return ENAMETOOLONG
;
2309 * Get a buffer for the name to be translated, and copy the
2310 * name into the buffer.
2312 MALLOC_ZONE(cnp
->cn_pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
2313 if (!cnp
->cn_pnbuf
) {
2316 cnp
->cn_pnlen
= MAXPATHLEN
;
2317 cnp
->cn_flags
|= HASBUF
;
2319 /* Copy the name from the mbuf list to the string */
2321 nfsm_chain_get_opaque(error
, nmc
, len
, cp
);
2325 cnp
->cn_pnbuf
[len
] = '\0';
2327 /* sanity check the string */
2328 if ((strlen(cp
) != len
) || strchr(cp
, '/')) {
2333 if (cnp
->cn_pnbuf
) {
2334 FREE_ZONE(cnp
->cn_pnbuf
, MAXPATHLEN
, M_NAMEI
);
2336 cnp
->cn_flags
&= ~HASBUF
;
2338 nip
->ni_pathlen
= len
;
2344 * Set up nameidata for a lookup() call and do it.
2348 struct nfsrv_descript
*nd
,
2350 struct nameidata
*nip
,
2351 struct nfs_filehandle
*nfhp
,
2353 struct nfs_export
**nxp
,
2354 struct nfs_export_options
**nxop
)
2358 struct componentname
*cnp
= &nip
->ni_cnd
;
2365 * Extract and set starting directory.
2367 error
= nfsrv_fhtovp(nfhp
, nd
, &dp
, nxp
, nxop
);
2371 error
= nfsrv_credcheck(nd
, ctx
, *nxp
, *nxop
);
2372 if (error
|| (vnode_vtype(dp
) != VDIR
)) {
2379 nip
->ni_cnd
.cn_context
= ctx
;
2381 if (*nxop
&& ((*nxop
)->nxo_flags
& NX_READONLY
)) {
2382 cnp
->cn_flags
|= RDONLY
;
2385 cnp
->cn_flags
|= NOCROSSMOUNT
;
2386 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2387 nip
->ni_usedvp
= nip
->ni_startdir
= dp
;
2388 nip
->ni_rootdir
= rootvnode
;
2391 * And call lookup() to do the real work
2393 cnflags
= nip
->ni_cnd
.cn_flags
; /* store in case we have to restore */
2394 while ((error
= lookup(nip
)) == ERECYCLE
) {
2395 nip
->ni_cnd
.cn_flags
= cnflags
;
2396 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2397 nip
->ni_usedvp
= nip
->ni_dvp
= nip
->ni_startdir
= dp
;
2403 /* Check for encountering a symbolic link */
2404 if (cnp
->cn_flags
& ISSYMLINK
) {
2405 if (cnp
->cn_flags
& (LOCKPARENT
| WANTPARENT
)) {
2406 vnode_put(nip
->ni_dvp
);
2409 vnode_put(nip
->ni_vp
);
2416 tmppn
= cnp
->cn_pnbuf
;
2417 cnp
->cn_pnbuf
= NULL
;
2418 cnp
->cn_flags
&= ~HASBUF
;
2419 FREE_ZONE(tmppn
, cnp
->cn_pnlen
, M_NAMEI
);
2425 * A fiddled version of m_adj() that ensures null fill to a 4-byte
2426 * boundary and only trims off the back end
2429 nfsm_adj(mbuf_t mp
, int len
, int nul
)
2436 * Trim from tail. Scan the mbuf chain,
2437 * calculating its length and finding the last mbuf.
2438 * If the adjustment only affects this mbuf, then just
2439 * adjust and return. Otherwise, rescan and truncate
2440 * after the remaining size.
2447 mnext
= mbuf_next(m
);
2448 if (mnext
== NULL
) {
2455 mbuf_setlen(m
, mlen
);
2457 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2458 for (i
= 0; i
< nul
; i
++) {
2469 * Correct length for chain is "count".
2470 * Find the mbuf with last data, adjust its length,
2471 * and toss data from remaining mbufs on chain.
2473 for (m
= mp
; m
; m
= mbuf_next(m
)) {
2475 if (mlen
>= count
) {
2477 mbuf_setlen(m
, count
);
2479 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2480 for (i
= 0; i
< nul
; i
++) {
2488 for (m
= mbuf_next(m
); m
; m
= mbuf_next(m
)) {
2494 * Trim the header out of the mbuf list and trim off any trailing
2495 * junk so that the mbuf list has only the write data.
2498 nfsm_chain_trim_data(struct nfsm_chain
*nmc
, int len
, int *mlen
)
2500 int cnt
= 0, dlen
, adjust
;
2509 for (m
= nmc
->nmc_mhead
; m
&& (m
!= nmc
->nmc_mcur
); m
= mbuf_next(m
)) {
2516 /* trim current mbuf */
2517 data
= mbuf_data(m
);
2519 adjust
= nmc
->nmc_ptr
- data
;
2521 if ((dlen
> 0) && (adjust
> 0)) {
2522 if (mbuf_setdata(m
, nmc
->nmc_ptr
, dlen
)) {
2526 mbuf_setlen(m
, dlen
);
2529 /* skip next len bytes */
2530 for (; m
&& (cnt
< len
); m
= mbuf_next(m
)) {
2534 /* truncate to end of data */
2535 mbuf_setlen(m
, dlen
- (cnt
- len
));
2536 if (m
== nmc
->nmc_mcur
) {
2537 nmc
->nmc_left
-= (cnt
- len
);
2546 /* trim any trailing data */
2547 if (m
== nmc
->nmc_mcur
) {
2550 for (; m
; m
= mbuf_next(m
)) {
2558 nfsm_chain_add_fattr(
2559 struct nfsrv_descript
*nd
,
2560 struct nfsm_chain
*nmc
,
2561 struct vnode_attr
*vap
)
2565 // XXX Should we assert here that all fields are supported?
2567 nfsm_chain_add_32(error
, nmc
, vtonfs_type(vap
->va_type
, nd
->nd_vers
));
2568 if (nd
->nd_vers
== NFS_VER3
) {
2569 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
& 07777);
2571 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
, vap
->va_mode
));
2573 nfsm_chain_add_32(error
, nmc
, vap
->va_nlink
);
2574 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
2575 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
2576 if (nd
->nd_vers
== NFS_VER3
) {
2577 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
2578 nfsm_chain_add_64(error
, nmc
, vap
->va_data_alloc
);
2579 nfsm_chain_add_32(error
, nmc
, major(vap
->va_rdev
));
2580 nfsm_chain_add_32(error
, nmc
, minor(vap
->va_rdev
));
2581 nfsm_chain_add_64(error
, nmc
, vap
->va_fsid
);
2582 nfsm_chain_add_64(error
, nmc
, vap
->va_fileid
);
2584 nfsm_chain_add_32(error
, nmc
, vap
->va_data_size
);
2585 nfsm_chain_add_32(error
, nmc
, NFS_FABLKSIZE
);
2586 if (vap
->va_type
== VFIFO
) {
2587 nfsm_chain_add_32(error
, nmc
, 0xffffffff);
2589 nfsm_chain_add_32(error
, nmc
, vap
->va_rdev
);
2591 nfsm_chain_add_32(error
, nmc
, vap
->va_data_alloc
/ NFS_FABLKSIZE
);
2592 nfsm_chain_add_32(error
, nmc
, vap
->va_fsid
);
2593 nfsm_chain_add_32(error
, nmc
, vap
->va_fileid
);
2595 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_access_time
);
2596 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_modify_time
);
2597 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_change_time
);
2603 nfsm_chain_get_sattr(
2604 struct nfsrv_descript
*nd
,
2605 struct nfsm_chain
*nmc
,
2606 struct vnode_attr
*vap
)
2611 struct timespec now
;
2613 if (nd
->nd_vers
== NFS_VER2
) {
2615 * There is/was a bug in the Sun client that puts 0xffff in the mode
2616 * field of sattr when it should put in 0xffffffff. The u_short
2617 * doesn't sign extend. So check the low order 2 bytes for 0xffff.
2619 nfsm_chain_get_32(error
, nmc
, val
);
2620 if ((val
& 0xffff) != 0xffff) {
2621 VATTR_SET(vap
, va_mode
, val
& 07777);
2622 /* save the "type" bits for NFSv2 create */
2623 VATTR_SET(vap
, va_type
, IFTOVT(val
));
2624 VATTR_CLEAR_ACTIVE(vap
, va_type
);
2626 nfsm_chain_get_32(error
, nmc
, val
);
2627 if (val
!= (uint32_t)-1) {
2628 VATTR_SET(vap
, va_uid
, val
);
2630 nfsm_chain_get_32(error
, nmc
, val
);
2631 if (val
!= (uint32_t)-1) {
2632 VATTR_SET(vap
, va_gid
, val
);
2634 /* save the "size" bits for NFSv2 create (even if they appear unset) */
2635 nfsm_chain_get_32(error
, nmc
, val
);
2636 VATTR_SET(vap
, va_data_size
, val
);
2637 if (val
== (uint32_t)-1) {
2638 VATTR_CLEAR_ACTIVE(vap
, va_data_size
);
2640 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2641 vap
->va_access_time
.tv_sec
,
2642 vap
->va_access_time
.tv_nsec
);
2643 if (vap
->va_access_time
.tv_sec
!= -1) {
2644 VATTR_SET_ACTIVE(vap
, va_access_time
);
2646 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2647 vap
->va_modify_time
.tv_sec
,
2648 vap
->va_modify_time
.tv_nsec
);
2649 if (vap
->va_modify_time
.tv_sec
!= -1) {
2650 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2656 nfsm_chain_get_32(error
, nmc
, val
);
2658 nfsm_chain_get_32(error
, nmc
, val
);
2659 VATTR_SET(vap
, va_mode
, val
& 07777);
2661 nfsm_chain_get_32(error
, nmc
, val
);
2663 nfsm_chain_get_32(error
, nmc
, val
);
2664 VATTR_SET(vap
, va_uid
, val
);
2666 nfsm_chain_get_32(error
, nmc
, val
);
2668 nfsm_chain_get_32(error
, nmc
, val
);
2669 VATTR_SET(vap
, va_gid
, val
);
2671 nfsm_chain_get_32(error
, nmc
, val
);
2673 nfsm_chain_get_64(error
, nmc
, val64
);
2674 VATTR_SET(vap
, va_data_size
, val64
);
2677 nfsm_chain_get_32(error
, nmc
, val
);
2679 case NFS_TIME_SET_TO_CLIENT
:
2680 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2681 vap
->va_access_time
.tv_sec
,
2682 vap
->va_access_time
.tv_nsec
);
2683 VATTR_SET_ACTIVE(vap
, va_access_time
);
2684 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2686 case NFS_TIME_SET_TO_SERVER
:
2687 VATTR_SET(vap
, va_access_time
, now
);
2688 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2691 nfsm_chain_get_32(error
, nmc
, val
);
2693 case NFS_TIME_SET_TO_CLIENT
:
2694 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2695 vap
->va_modify_time
.tv_sec
,
2696 vap
->va_modify_time
.tv_nsec
);
2697 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2698 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2700 case NFS_TIME_SET_TO_SERVER
:
2701 VATTR_SET(vap
, va_modify_time
, now
);
2702 if (!VATTR_IS_ACTIVE(vap
, va_access_time
)) {
2703 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2712 * Compare two security flavor structs
2715 nfsrv_cmp_secflavs(struct nfs_sec
*sf1
, struct nfs_sec
*sf2
)
2719 if (sf1
->count
!= sf2
->count
) {
2722 for (i
= 0; i
< sf1
->count
; i
++) {
2723 if (sf1
->flavors
[i
] != sf2
->flavors
[i
]) {
2731 * Build hash lists of net addresses and hang them off the NFS export.
2732 * Called by nfsrv_export() to set up the lists of export addresses.
2735 nfsrv_hang_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2737 struct nfs_export_net_args nxna
;
2738 struct nfs_netopt
*no
, *rn_no
;
2739 struct radix_node_head
*rnh
;
2740 struct radix_node
*rn
;
2741 struct sockaddr
*saddr
, *smask
;
2748 uaddr
= unxa
->nxa_nets
;
2749 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2750 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2755 if (nxna
.nxna_addr
.ss_len
> sizeof(struct sockaddr_storage
) ||
2756 nxna
.nxna_mask
.ss_len
> sizeof(struct sockaddr_storage
) ||
2757 nxna
.nxna_addr
.ss_family
> AF_MAX
||
2758 nxna
.nxna_mask
.ss_family
> AF_MAX
) {
2762 if (nxna
.nxna_flags
& (NX_MAPROOT
| NX_MAPALL
)) {
2763 struct posix_cred temp_pcred
;
2764 bzero(&temp_pcred
, sizeof(temp_pcred
));
2765 temp_pcred
.cr_uid
= nxna
.nxna_cred
.cr_uid
;
2766 temp_pcred
.cr_ngroups
= nxna
.nxna_cred
.cr_ngroups
;
2767 for (i
= 0; i
< nxna
.nxna_cred
.cr_ngroups
&& i
< NGROUPS
; i
++) {
2768 temp_pcred
.cr_groups
[i
] = nxna
.nxna_cred
.cr_groups
[i
];
2770 cred
= posix_cred_create(&temp_pcred
);
2771 if (!IS_VALID_CRED(cred
)) {
2778 if (nxna
.nxna_addr
.ss_len
== 0) {
2779 /* No address means this is a default/world export */
2780 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2781 if (IS_VALID_CRED(cred
)) {
2782 kauth_cred_unref(&cred
);
2786 nx
->nx_flags
|= NX_DEFAULTEXPORT
;
2787 nx
->nx_defopt
.nxo_flags
= nxna
.nxna_flags
;
2788 nx
->nx_defopt
.nxo_cred
= cred
;
2789 bcopy(&nxna
.nxna_sec
, &nx
->nx_defopt
.nxo_sec
, sizeof(struct nfs_sec
));
2794 i
= sizeof(struct nfs_netopt
);
2795 i
+= nxna
.nxna_addr
.ss_len
+ nxna
.nxna_mask
.ss_len
;
2796 MALLOC(no
, struct nfs_netopt
*, i
, M_NETADDR
, M_WAITOK
);
2798 if (IS_VALID_CRED(cred
)) {
2799 kauth_cred_unref(&cred
);
2803 bzero(no
, sizeof(struct nfs_netopt
));
2804 no
->no_opt
.nxo_flags
= nxna
.nxna_flags
;
2805 no
->no_opt
.nxo_cred
= cred
;
2806 bcopy(&nxna
.nxna_sec
, &no
->no_opt
.nxo_sec
, sizeof(struct nfs_sec
));
2808 saddr
= (struct sockaddr
*)(no
+ 1);
2809 bcopy(&nxna
.nxna_addr
, saddr
, nxna
.nxna_addr
.ss_len
);
2810 if (nxna
.nxna_mask
.ss_len
) {
2811 smask
= (struct sockaddr
*)((caddr_t
)saddr
+ nxna
.nxna_addr
.ss_len
);
2812 bcopy(&nxna
.nxna_mask
, smask
, nxna
.nxna_mask
.ss_len
);
2816 i
= saddr
->sa_family
;
2817 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2819 * Seems silly to initialize every AF when most are not
2820 * used, do so on demand here
2822 TAILQ_FOREACH(dom
, &domains
, dom_entry
) {
2823 if (dom
->dom_family
== i
&& dom
->dom_rtattach
) {
2824 dom
->dom_rtattach((void **)&nx
->nx_rtable
[i
],
2829 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2830 if (IS_VALID_CRED(cred
)) {
2831 kauth_cred_unref(&cred
);
2833 _FREE(no
, M_NETADDR
);
2837 rn
= (*rnh
->rnh_addaddr
)((caddr_t
)saddr
, (caddr_t
)smask
, rnh
, no
->no_rnodes
);
2840 * One of the reasons that rnh_addaddr may fail is that
2841 * the entry already exists. To check for this case, we
2842 * look up the entry to see if it is there. If so, we
2843 * do not need to make a new entry but do continue.
2845 * XXX should this be rnh_lookup() instead?
2848 rn
= (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
2849 rn_no
= (struct nfs_netopt
*)rn
;
2850 if (rn
!= 0 && (rn
->rn_flags
& RNF_ROOT
) == 0 &&
2851 (rn_no
->no_opt
.nxo_flags
== nxna
.nxna_flags
) &&
2852 (!nfsrv_cmp_secflavs(&rn_no
->no_opt
.nxo_sec
, &nxna
.nxna_sec
))) {
2853 kauth_cred_t cred2
= rn_no
->no_opt
.nxo_cred
;
2854 if (cred
== cred2
) {
2855 /* creds are same (or both NULL) */
2857 } else if (cred
&& cred2
&& (kauth_cred_getuid(cred
) == kauth_cred_getuid(cred2
))) {
2859 * Now compare the effective and
2860 * supplementary groups...
2862 * Note: This comparison, as written,
2863 * does not correctly indicate that
2864 * the groups are equivalent, since
2865 * other than the first supplementary
2866 * group, which is also the effective
2867 * group, order on the remaining groups
2868 * doesn't matter, and this is an
2871 gid_t groups
[NGROUPS
];
2872 gid_t groups2
[NGROUPS
];
2873 int groupcount
= NGROUPS
;
2874 int group2count
= NGROUPS
;
2876 if (!kauth_cred_getgroups(cred
, groups
, &groupcount
) &&
2877 !kauth_cred_getgroups(cred2
, groups2
, &group2count
) &&
2878 groupcount
== group2count
) {
2879 for (i
= 0; i
< group2count
; i
++) {
2880 if (groups
[i
] != groups2
[i
]) {
2884 if (i
>= group2count
|| i
>= NGROUPS
) {
2890 if (IS_VALID_CRED(cred
)) {
2891 kauth_cred_unref(&cred
);
2893 _FREE(no
, M_NETADDR
);
2906 * In order to properly track an export's netopt count, we need to pass
2907 * an additional argument to nfsrv_free_netopt() so that it can decrement
2908 * the export's netopt count.
2910 struct nfsrv_free_netopt_arg
{
2912 struct radix_node_head
*rnh
;
2916 nfsrv_free_netopt(struct radix_node
*rn
, void *w
)
2918 struct nfsrv_free_netopt_arg
*fna
= (struct nfsrv_free_netopt_arg
*)w
;
2919 struct radix_node_head
*rnh
= fna
->rnh
;
2920 uint32_t *cnt
= fna
->cnt
;
2921 struct nfs_netopt
*nno
= (struct nfs_netopt
*)rn
;
2923 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2924 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
2925 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
2927 _FREE((caddr_t
)rn
, M_NETADDR
);
2933 * Free the net address hash lists that are hanging off the mount points.
2936 nfsrv_free_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2938 struct nfs_export_net_args nxna
;
2939 struct radix_node_head
*rnh
;
2940 struct radix_node
*rn
;
2941 struct nfsrv_free_netopt_arg fna
;
2942 struct nfs_netopt
*nno
;
2947 if (!unxa
|| !unxa
->nxa_netcount
) {
2948 /* delete everything */
2949 for (i
= 0; i
<= AF_MAX
; i
++) {
2950 if ((rnh
= nx
->nx_rtable
[i
])) {
2952 fna
.cnt
= &nx
->nx_expcnt
;
2953 (*rnh
->rnh_walktree
)(rnh
, nfsrv_free_netopt
, (caddr_t
)&fna
);
2954 _FREE((caddr_t
)rnh
, M_RTABLE
);
2955 nx
->nx_rtable
[i
] = 0;
2961 /* delete only the exports specified */
2962 uaddr
= unxa
->nxa_nets
;
2963 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2964 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2969 if (nxna
.nxna_addr
.ss_len
== 0) {
2970 /* No address means this is a default/world export */
2971 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2972 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2973 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2974 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2981 if ((rnh
= nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
]) == 0) {
2982 /* AF not initialized? */
2983 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2984 printf("nfsrv_free_addrlist: address not found (0)\n");
2989 rn
= (*rnh
->rnh_lookup
)(&nxna
.nxna_addr
,
2990 nxna
.nxna_mask
.ss_len
? &nxna
.nxna_mask
: NULL
, rnh
);
2991 if (!rn
|| (rn
->rn_flags
& RNF_ROOT
)) {
2992 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2993 printf("nfsrv_free_addrlist: address not found (1)\n");
2998 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2999 nno
= (struct nfs_netopt
*)rn
;
3000 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
3001 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
3003 _FREE((caddr_t
)rn
, M_NETADDR
);
3006 if (nx
->nx_expcnt
== ((nx
->nx_flags
& NX_DEFAULTEXPORT
) ? 1 : 0)) {
3007 /* no more entries in rnh, so free it up */
3008 _FREE((caddr_t
)rnh
, M_RTABLE
);
3009 nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
] = 0;
3016 void enablequotas(struct mount
*mp
, vfs_context_t ctx
); // XXX
3019 nfsrv_export(struct user_nfs_export_args
*unxa
, vfs_context_t ctx
)
3023 struct nfs_exportfs
*nxfs
, *nxfs2
, *nxfs3
;
3024 struct nfs_export
*nx
, *nx2
, *nx3
;
3025 struct nfs_filehandle nfh
;
3026 struct nameidata mnd
, xnd
;
3027 vnode_t mvp
= NULL
, xvp
= NULL
;
3029 char path
[MAXPATHLEN
];
3032 if (unxa
->nxa_flags
== NXA_CHECK
) {
3033 /* just check if the path is an NFS-exportable file system */
3034 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3038 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3039 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3040 error
= namei(&mnd
);
3045 mp
= vnode_mount(mvp
);
3046 /* make sure it's the root of a file system */
3047 if (!vnode_isvroot(mvp
)) {
3050 /* make sure the file system is NFS-exportable */
3052 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3053 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3055 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3058 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3066 /* all other operations: must be super user */
3067 if ((error
= vfs_context_suser(ctx
))) {
3071 if (unxa
->nxa_flags
& NXA_DELETE_ALL
) {
3072 /* delete all exports on all file systems */
3073 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3074 while ((nxfs
= LIST_FIRST(&nfsrv_exports
))) {
3075 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3077 vfs_clearflags(mp
, MNT_EXPORTED
);
3081 /* delete all exports on this file system */
3082 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3083 LIST_REMOVE(nx
, nx_next
);
3084 LIST_REMOVE(nx
, nx_hash
);
3085 /* delete all netopts for this export */
3086 nfsrv_free_addrlist(nx
, NULL
);
3087 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3088 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3089 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3091 /* free active user list for this export */
3092 nfsrv_free_user_list(&nx
->nx_user_list
);
3093 FREE(nx
->nx_path
, M_TEMP
);
3096 LIST_REMOVE(nxfs
, nxfs_next
);
3097 FREE(nxfs
->nxfs_path
, M_TEMP
);
3100 if (nfsrv_export_hashtbl
) {
3101 /* all exports deleted, clean up export hash table */
3102 FREE(nfsrv_export_hashtbl
, M_TEMP
);
3103 nfsrv_export_hashtbl
= NULL
;
3105 lck_rw_done(&nfsrv_export_rwlock
);
3109 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3114 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3116 /* init export hash table if not already */
3117 if (!nfsrv_export_hashtbl
) {
3118 if (nfsrv_export_hash_size
<= 0) {
3119 nfsrv_export_hash_size
= NFSRVEXPHASHSZ
;
3121 nfsrv_export_hashtbl
= hashinit(nfsrv_export_hash_size
, M_TEMP
, &nfsrv_export_hash
);
3124 // first check if we've already got an exportfs with the given ID
3125 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3126 if (nxfs
->nxfs_id
== unxa
->nxa_fsid
) {
3131 /* verify exported FS path matches given path */
3132 if (strncmp(path
, nxfs
->nxfs_path
, MAXPATHLEN
)) {
3136 if ((unxa
->nxa_flags
& (NXA_ADD
| NXA_OFFLINE
)) == NXA_ADD
) {
3137 /* if adding, verify that the mount is still what we expect */
3138 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3143 /* find exported FS root vnode */
3144 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3145 UIO_SYSSPACE
, CAST_USER_ADDR_T(nxfs
->nxfs_path
), ctx
);
3146 error
= namei(&mnd
);
3151 /* make sure it's (still) the root of a file system */
3152 if (!vnode_isvroot(mvp
)) {
3156 /* sanity check: this should be same mount */
3157 if (mp
!= vnode_mount(mvp
)) {
3163 /* no current exported file system with that ID */
3164 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3169 /* find exported FS root vnode */
3170 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3171 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3172 error
= namei(&mnd
);
3174 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3179 /* make sure it's the root of a file system */
3180 if (!vnode_isvroot(mvp
)) {
3181 /* bail if not marked offline */
3182 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3190 mp
= vnode_mount(mvp
);
3193 /* make sure the file system is NFS-exportable */
3194 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3195 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3196 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3199 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3208 /* add an exportfs for it */
3209 MALLOC(nxfs
, struct nfs_exportfs
*, sizeof(struct nfs_exportfs
), M_TEMP
, M_WAITOK
);
3214 bzero(nxfs
, sizeof(struct nfs_exportfs
));
3215 nxfs
->nxfs_id
= unxa
->nxa_fsid
;
3216 MALLOC(nxfs
->nxfs_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3217 if (!nxfs
->nxfs_path
) {
3222 bcopy(path
, nxfs
->nxfs_path
, pathlen
);
3223 /* insert into list in reverse-sorted order */
3225 LIST_FOREACH(nxfs2
, &nfsrv_exports
, nxfs_next
) {
3226 if (strncmp(nxfs
->nxfs_path
, nxfs2
->nxfs_path
, MAXPATHLEN
) > 0) {
3232 LIST_INSERT_BEFORE(nxfs2
, nxfs
, nxfs_next
);
3234 LIST_INSERT_AFTER(nxfs3
, nxfs
, nxfs_next
);
3236 LIST_INSERT_HEAD(&nfsrv_exports
, nxfs
, nxfs_next
);
3239 /* make sure any quotas are enabled before we export the file system */
3241 enablequotas(mp
, ctx
);
3245 if (unxa
->nxa_exppath
) {
3246 error
= copyinstr(unxa
->nxa_exppath
, path
, MAXPATHLEN
, &pathlen
);
3250 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3251 if (nx
->nx_id
== unxa
->nxa_expid
) {
3256 /* verify exported FS path matches given path */
3257 if (strncmp(path
, nx
->nx_path
, MAXPATHLEN
)) {
3262 /* no current export with that ID */
3263 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3267 /* add an export for it */
3268 MALLOC(nx
, struct nfs_export
*, sizeof(struct nfs_export
), M_TEMP
, M_WAITOK
);
3273 bzero(nx
, sizeof(struct nfs_export
));
3274 nx
->nx_id
= unxa
->nxa_expid
;
3276 microtime(&nx
->nx_exptime
);
3277 MALLOC(nx
->nx_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3284 bcopy(path
, nx
->nx_path
, pathlen
);
3285 /* initialize the active user list */
3286 nfsrv_init_user_list(&nx
->nx_user_list
);
3287 /* insert into list in reverse-sorted order */
3289 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3290 if (strncmp(nx
->nx_path
, nx2
->nx_path
, MAXPATHLEN
) > 0) {
3296 LIST_INSERT_BEFORE(nx2
, nx
, nx_next
);
3298 LIST_INSERT_AFTER(nx3
, nx
, nx_next
);
3300 LIST_INSERT_HEAD(&nxfs
->nxfs_exports
, nx
, nx_next
);
3302 /* insert into hash */
3303 LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs
->nxfs_id
, nx
->nx_id
), nx
, nx_hash
);
3306 * We don't allow/support nested exports. Check if the new entry
3307 * nests with the entries before and after or if there's an
3308 * entry for the file system root and subdirs.
3311 if ((nx3
&& !strncmp(nx3
->nx_path
, nx
->nx_path
, pathlen
- 1) &&
3312 (nx3
->nx_path
[pathlen
- 1] == '/')) ||
3313 (nx2
&& !strncmp(nx2
->nx_path
, nx
->nx_path
, strlen(nx2
->nx_path
)) &&
3314 (nx
->nx_path
[strlen(nx2
->nx_path
)] == '/'))) {
3318 /* check export conflict with fs root export and vice versa */
3319 expisroot
= !nx
->nx_path
[0] ||
3320 ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1]);
3321 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3326 } else if (!nx2
->nx_path
[0]) {
3328 } else if ((nx2
->nx_path
[0] == '.') && !nx2
->nx_path
[1]) {
3338 * Don't actually return an error because mountd is
3339 * probably about to delete the conflicting export.
3340 * This can happen when a new export momentarily conflicts
3341 * with an old export while the transition is being made.
3342 * Theoretically, mountd could be written to avoid this
3343 * transient situation - but it would greatly increase the
3344 * complexity of mountd for very little overall benefit.
3346 printf("nfsrv_export: warning: nested exports: %s/%s\n",
3347 nxfs
->nxfs_path
, nx
->nx_path
);
3350 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3352 /* make sure file handle is set up */
3353 if ((nx
->nx_fh
.nfh_xh
.nxh_version
!= htonl(NFS_FH_VERSION
)) ||
3354 (nx
->nx_fh
.nfh_xh
.nxh_flags
& NXHF_INVALIDFH
)) {
3355 /* try to set up export root file handle */
3356 nx
->nx_fh
.nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3357 nx
->nx_fh
.nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3358 nx
->nx_fh
.nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3359 nx
->nx_fh
.nfh_xh
.nxh_flags
= 0;
3360 nx
->nx_fh
.nfh_xh
.nxh_reserved
= 0;
3361 nx
->nx_fh
.nfh_fhp
= (u_char
*)&nx
->nx_fh
.nfh_xh
;
3362 bzero(&nx
->nx_fh
.nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3364 /* find export root vnode */
3365 if (!nx
->nx_path
[0] || ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1])) {
3366 /* exporting file system's root directory */
3370 xnd
.ni_cnd
.cn_nameiop
= LOOKUP
;
3372 xnd
.ni_op
= OP_LOOKUP
;
3374 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3375 xnd
.ni_pathlen
= pathlen
- 1;
3376 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
= path
;
3377 xnd
.ni_startdir
= mvp
;
3378 xnd
.ni_usedvp
= mvp
;
3379 xnd
.ni_rootdir
= rootvnode
;
3380 xnd
.ni_cnd
.cn_context
= ctx
;
3381 while ((error
= lookup(&xnd
)) == ERECYCLE
) {
3382 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3383 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
;
3384 xnd
.ni_usedvp
= xnd
.ni_dvp
= xnd
.ni_startdir
= mvp
;
3392 if (vnode_vtype(xvp
) != VDIR
) {
3398 /* grab file handle */
3399 nx
->nx_fh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3400 error
= VFS_VPTOFH(xvp
, (int*)&nx
->nx_fh
.nfh_len
, &nx
->nx_fh
.nfh_fid
[0], NULL
);
3401 if (!error
&& (nx
->nx_fh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3404 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= nx
->nx_fh
.nfh_len
;
3405 nx
->nx_fh
.nfh_len
+= sizeof(nx
->nx_fh
.nfh_xh
);
3413 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3414 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= 0;
3415 nx
->nx_fh
.nfh_len
= sizeof(nx
->nx_fh
.nfh_xh
);
3422 /* perform the export changes */
3423 if (unxa
->nxa_flags
& NXA_DELETE
) {
3425 /* delete all exports on this file system */
3426 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3427 LIST_REMOVE(nx
, nx_next
);
3428 LIST_REMOVE(nx
, nx_hash
);
3429 /* delete all netopts for this export */
3430 nfsrv_free_addrlist(nx
, NULL
);
3431 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3432 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3433 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3435 /* delete active user list for this export */
3436 nfsrv_free_user_list(&nx
->nx_user_list
);
3437 FREE(nx
->nx_path
, M_TEMP
);
3441 } else if (!unxa
->nxa_netcount
) {
3442 /* delete all netopts for this export */
3443 nfsrv_free_addrlist(nx
, NULL
);
3444 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3445 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3446 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3449 /* delete only the netopts for the given addresses */
3450 error
= nfsrv_free_addrlist(nx
, unxa
);
3456 if (unxa
->nxa_flags
& NXA_ADD
) {
3458 * If going offline set the export time so that when
3459 * coming back on line we will present a new write verifier
3462 if (unxa
->nxa_flags
& NXA_OFFLINE
) {
3463 microtime(&nx
->nx_exptime
);
3466 error
= nfsrv_hang_addrlist(nx
, unxa
);
3468 vfs_setflags(mp
, MNT_EXPORTED
);
3473 if (nx
&& !nx
->nx_expcnt
) {
3474 /* export has no export options */
3475 LIST_REMOVE(nx
, nx_next
);
3476 LIST_REMOVE(nx
, nx_hash
);
3477 /* delete active user list for this export */
3478 nfsrv_free_user_list(&nx
->nx_user_list
);
3479 FREE(nx
->nx_path
, M_TEMP
);
3482 if (LIST_EMPTY(&nxfs
->nxfs_exports
)) {
3483 /* exported file system has no more exports */
3484 LIST_REMOVE(nxfs
, nxfs_next
);
3485 FREE(nxfs
->nxfs_path
, M_TEMP
);
3488 vfs_clearflags(mp
, MNT_EXPORTED
);
3501 lck_rw_done(&nfsrv_export_rwlock
);
3506 * Check if there is a least one export that will allow this address.
3508 * Return 0, if there is an export that will allow this address,
3509 * else return EACCES
3512 nfsrv_check_exports_allow_address(mbuf_t nam
)
3514 struct nfs_exportfs
*nxfs
;
3515 struct nfs_export
*nx
;
3516 struct nfs_export_options
*nxo
= NULL
;
3522 lck_rw_lock_shared(&nfsrv_export_rwlock
);
3523 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3524 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3525 /* A little optimizing by checking for the default first */
3526 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
3527 nxo
= &nx
->nx_defopt
;
3529 if (nxo
|| (nxo
= nfsrv_export_lookup(nx
, nam
))) {
3535 lck_rw_done(&nfsrv_export_rwlock
);
3537 return nxo
? 0 : EACCES
;
3540 struct nfs_export_options
*
3541 nfsrv_export_lookup(struct nfs_export
*nx
, mbuf_t nam
)
3543 struct nfs_export_options
*nxo
= NULL
;
3544 struct nfs_netopt
*no
= NULL
;
3545 struct radix_node_head
*rnh
;
3546 struct sockaddr
*saddr
;
3548 /* Lookup in the export list first. */
3550 saddr
= mbuf_data(nam
);
3551 if (saddr
->sa_family
> AF_MAX
) {
3552 /* Bogus sockaddr? Don't match anything. */
3555 rnh
= nx
->nx_rtable
[saddr
->sa_family
];
3557 no
= (struct nfs_netopt
*)
3558 (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
3559 if (no
&& no
->no_rnodes
->rn_flags
& RNF_ROOT
) {
3567 /* If no address match, use the default if it exists. */
3568 if ((nxo
== NULL
) && (nx
->nx_flags
& NX_DEFAULTEXPORT
)) {
3569 nxo
= &nx
->nx_defopt
;
3574 /* find an export for the given handle */
3576 nfsrv_fhtoexport(struct nfs_filehandle
*nfhp
)
3578 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3579 struct nfs_export
*nx
;
3580 uint32_t fsid
, expid
;
3582 if (!nfsrv_export_hashtbl
) {
3585 fsid
= ntohl(nxh
->nxh_fsid
);
3586 expid
= ntohl(nxh
->nxh_expid
);
3587 nx
= NFSRVEXPHASH(fsid
, expid
)->lh_first
;
3588 for (; nx
; nx
= LIST_NEXT(nx
, nx_hash
)) {
3589 if (nx
->nx_fs
->nxfs_id
!= fsid
) {
3592 if (nx
->nx_id
!= expid
) {
3600 struct nfsrv_getvfs_by_mntonname_callback_args
{
3601 const char *path
; /* IN */
3602 mount_t mp
; /* OUT */
3606 nfsrv_getvfs_by_mntonname_callback(mount_t mp
, void *v
)
3608 struct nfsrv_getvfs_by_mntonname_callback_args
* const args
= v
;
3609 char real_mntonname
[MAXPATHLEN
];
3610 int pathbuflen
= MAXPATHLEN
;
3614 error
= VFS_ROOT(mp
, &rvp
, vfs_context_current());
3618 error
= vn_getpath_ext(rvp
, NULLVP
, real_mntonname
, &pathbuflen
,
3619 VN_GETPATH_FSENTER
| VN_GETPATH_NO_FIRMLINK
);
3624 if (strcmp(args
->path
, real_mntonname
) == 0) {
3625 error
= vfs_busy(mp
, LK_NOWAIT
);
3629 return VFS_RETURNED_DONE
;
3632 return VFS_RETURNED
;
3636 nfsrv_getvfs_by_mntonname(char *path
)
3638 struct nfsrv_getvfs_by_mntonname_callback_args args
= {
3645 mp
= vfs_getvfs_by_mntonname(path
);
3647 error
= vfs_busy(mp
, LK_NOWAIT
);
3652 } else if (vfs_iterate(0, nfsrv_getvfs_by_mntonname_callback
,
3660 * nfsrv_fhtovp() - convert FH to vnode and export info
3664 struct nfs_filehandle
*nfhp
,
3665 struct nfsrv_descript
*nd
,
3667 struct nfs_export
**nxp
,
3668 struct nfs_export_options
**nxop
)
3670 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3671 struct nfs_export_options
*nxo
;
3687 v
= ntohl(nxh
->nxh_version
);
3688 if (v
!= NFS_FH_VERSION
) {
3689 /* file handle format not supported */
3692 if (nfhp
->nfh_len
> NFSV3_MAX_FH_SIZE
) {
3695 if (nfhp
->nfh_len
< (int)sizeof(struct nfs_exphandle
)) {
3698 v
= ntohs(nxh
->nxh_flags
);
3699 if (v
& NXHF_INVALIDFH
) {
3703 *nxp
= nfsrv_fhtoexport(nfhp
);
3708 /* Get the export option structure for this <export, client> tuple. */
3709 *nxop
= nxo
= nfsrv_export_lookup(*nxp
, nam
);
3710 if (nam
&& (*nxop
== NULL
)) {
3715 /* Validate the security flavor of the request */
3716 for (i
= 0, valid
= 0; i
< nxo
->nxo_sec
.count
; i
++) {
3717 if (nd
->nd_sec
== nxo
->nxo_sec
.flavors
[i
]) {
3724 * RFC 2623 section 2.3.2 recommends no authentication
3725 * requirement for certain NFS procedures used for mounting.
3726 * This allows an unauthenticated superuser on the client
3727 * to do mounts for the benefit of authenticated users.
3729 if (nd
->nd_vers
== NFS_VER2
) {
3730 if (nd
->nd_procnum
== NFSV2PROC_GETATTR
||
3731 nd
->nd_procnum
== NFSV2PROC_STATFS
) {
3735 if (nd
->nd_vers
== NFS_VER3
) {
3736 if (nd
->nd_procnum
== NFSPROC_FSINFO
) {
3742 return NFSERR_AUTHERR
| AUTH_REJECTCRED
;
3747 if (nxo
&& (nxo
->nxo_flags
& NX_OFFLINE
)) {
3748 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3751 /* find mount structure */
3752 mp
= nfsrv_getvfs_by_mntonname((*nxp
)->nx_fs
->nxfs_path
);
3755 * We have an export, but no mount?
3756 * Perhaps the export just hasn't been marked offline yet.
3758 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3761 fidp
= nfhp
->nfh_fhp
+ sizeof(*nxh
);
3762 error
= VFS_FHTOVP(mp
, nxh
->nxh_fidlen
, fidp
, vpp
, NULL
);
3767 /* vnode pointer should be good at this point or ... */
3775 * nfsrv_credcheck() - check/map credentials according
3776 * to given export options.
3780 struct nfsrv_descript
*nd
,
3782 __unused
struct nfs_export
*nx
,
3783 struct nfs_export_options
*nxo
)
3785 if (nxo
&& nxo
->nxo_cred
) {
3786 if ((nxo
->nxo_flags
& NX_MAPALL
) ||
3787 ((nxo
->nxo_flags
& NX_MAPROOT
) && !suser(nd
->nd_cr
, NULL
))) {
3788 kauth_cred_ref(nxo
->nxo_cred
);
3789 kauth_cred_unref(&nd
->nd_cr
);
3790 nd
->nd_cr
= nxo
->nxo_cred
;
3793 ctx
->vc_ucred
= nd
->nd_cr
;
3798 * nfsrv_vptofh() - convert vnode to file handle for given export
3800 * If the caller is passing in a vnode for a ".." directory entry,
3801 * they can pass a directory NFS file handle (dnfhp) which will be
3802 * checked against the root export file handle. If it matches, we
3803 * refuse to provide the file handle for the out-of-export directory.
3807 struct nfs_export
*nx
,
3809 struct nfs_filehandle
*dnfhp
,
3812 struct nfs_filehandle
*nfhp
)
3815 uint32_t maxfidsize
;
3817 nfhp
->nfh_fhp
= (u_char
*)&nfhp
->nfh_xh
;
3818 nfhp
->nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3819 nfhp
->nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3820 nfhp
->nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3821 nfhp
->nfh_xh
.nxh_flags
= 0;
3822 nfhp
->nfh_xh
.nxh_reserved
= 0;
3824 if (nfsvers
== NFS_VER2
) {
3825 bzero(&nfhp
->nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3828 /* if directory FH matches export root, return invalid FH */
3829 if (dnfhp
&& nfsrv_fhmatch(dnfhp
, &nx
->nx_fh
)) {
3830 if (nfsvers
== NFS_VER2
) {
3831 nfhp
->nfh_len
= NFSX_V2FH
;
3833 nfhp
->nfh_len
= sizeof(nfhp
->nfh_xh
);
3835 nfhp
->nfh_xh
.nxh_fidlen
= 0;
3836 nfhp
->nfh_xh
.nxh_flags
= htons(NXHF_INVALIDFH
);
3840 if (nfsvers
== NFS_VER2
) {
3841 maxfidsize
= NFSV2_MAX_FID_SIZE
;
3843 maxfidsize
= NFSV3_MAX_FID_SIZE
;
3845 nfhp
->nfh_len
= maxfidsize
;
3847 error
= VFS_VPTOFH(vp
, (int*)&nfhp
->nfh_len
, &nfhp
->nfh_fid
[0], ctx
);
3851 if (nfhp
->nfh_len
> maxfidsize
) {
3854 nfhp
->nfh_xh
.nxh_fidlen
= nfhp
->nfh_len
;
3855 nfhp
->nfh_len
+= sizeof(nfhp
->nfh_xh
);
3856 if ((nfsvers
== NFS_VER2
) && (nfhp
->nfh_len
< NFSX_V2FH
)) {
3857 nfhp
->nfh_len
= NFSX_V2FH
;
3864 * Compare two file handles to see it they're the same.
3865 * Note that we don't use nfh_len because that may include
3866 * padding in an NFSv2 file handle.
3869 nfsrv_fhmatch(struct nfs_filehandle
*fh1
, struct nfs_filehandle
*fh2
)
3871 struct nfs_exphandle
*nxh1
, *nxh2
;
3874 nxh1
= (struct nfs_exphandle
*)fh1
->nfh_fhp
;
3875 nxh2
= (struct nfs_exphandle
*)fh2
->nfh_fhp
;
3876 len1
= sizeof(fh1
->nfh_xh
) + nxh1
->nxh_fidlen
;
3877 len2
= sizeof(fh2
->nfh_xh
) + nxh2
->nxh_fidlen
;
3881 if (bcmp(nxh1
, nxh2
, len1
)) {
3888 * Functions for dealing with active user lists
3892 * Search the hash table for a user node with a matching IP address and uid field.
3893 * If found, the node's tm_last timestamp is updated and the node is returned.
3895 * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned.
3896 * Returns NULL if a new node could not be allcoated.
3898 * The list's user_mutex lock MUST be held.
3900 struct nfs_user_stat_node
*
3901 nfsrv_get_user_stat_node(struct nfs_active_user_list
*list
, struct sockaddr
*saddr
, uid_t uid
)
3903 struct nfs_user_stat_node
*unode
;
3905 struct nfs_user_stat_hashtbl_head
*head
;
3907 /* seach the hash table */
3908 head
= NFS_USER_STAT_HASH(list
->user_hashtbl
, uid
);
3909 LIST_FOREACH(unode
, head
, hash_link
) {
3910 if ((uid
== unode
->uid
) && (nfs_sockaddr_cmp(saddr
, (struct sockaddr
*)&unode
->sock
) == 0)) {
3911 /* found matching node */
3917 /* found node in the hash table, now update lru position */
3918 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3919 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3921 /* update time stamp */
3923 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3927 if (list
->node_count
< nfsrv_user_stat_max_nodes
) {
3928 /* Allocate a new node */
3929 MALLOC(unode
, struct nfs_user_stat_node
*, sizeof(struct nfs_user_stat_node
),
3930 M_TEMP
, M_WAITOK
| M_ZERO
);
3936 /* increment node count */
3937 OSAddAtomic(1, &nfsrv_user_stat_node_count
);
3940 /* reuse the oldest node in the lru list */
3941 unode
= TAILQ_FIRST(&list
->user_lru
);
3947 /* Remove the node */
3948 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3949 LIST_REMOVE(unode
, hash_link
);
3952 /* Initialize the node */
3954 bcopy(saddr
, &unode
->sock
, saddr
->sa_len
);
3957 unode
->bytes_read
= 0;
3958 unode
->bytes_written
= 0;
3959 unode
->tm_start
= (uint32_t)now
.tv_sec
;
3960 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3962 /* insert the node */
3963 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3964 LIST_INSERT_HEAD(head
, unode
, hash_link
);
3970 nfsrv_update_user_stat(struct nfs_export
*nx
, struct nfsrv_descript
*nd
, uid_t uid
, u_int ops
, u_int rd_bytes
, u_int wr_bytes
)
3972 struct nfs_user_stat_node
*unode
;
3973 struct nfs_active_user_list
*ulist
;
3974 struct sockaddr
*saddr
;
3976 if ((!nfsrv_user_stat_enabled
) || (!nx
) || (!nd
) || (!nd
->nd_nam
)) {
3980 saddr
= (struct sockaddr
*)mbuf_data(nd
->nd_nam
);
3982 /* check address family before going any further */
3983 if ((saddr
->sa_family
!= AF_INET
) && (saddr
->sa_family
!= AF_INET6
)) {
3987 ulist
= &nx
->nx_user_list
;
3989 /* lock the active user list */
3990 lck_mtx_lock(&ulist
->user_mutex
);
3992 /* get the user node */
3993 unode
= nfsrv_get_user_stat_node(ulist
, saddr
, uid
);
3996 lck_mtx_unlock(&ulist
->user_mutex
);
4000 /* update counters */
4002 unode
->bytes_read
+= rd_bytes
;
4003 unode
->bytes_written
+= wr_bytes
;
4006 lck_mtx_unlock(&ulist
->user_mutex
);
4009 /* initialize an active user list */
4011 nfsrv_init_user_list(struct nfs_active_user_list
*ulist
)
4015 /* initialize the lru */
4016 TAILQ_INIT(&ulist
->user_lru
);
4018 /* initialize the hash table */
4019 for (i
= 0; i
< NFS_USER_STAT_HASH_SIZE
; i
++) {
4020 LIST_INIT(&ulist
->user_hashtbl
[i
]);
4022 ulist
->node_count
= 0;
4024 lck_mtx_init(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
, LCK_ATTR_NULL
);
4027 /* Free all nodes in an active user list */
4029 nfsrv_free_user_list(struct nfs_active_user_list
*ulist
)
4031 struct nfs_user_stat_node
*unode
;
4037 while ((unode
= TAILQ_FIRST(&ulist
->user_lru
))) {
4038 /* Remove node and free */
4039 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4040 LIST_REMOVE(unode
, hash_link
);
4041 FREE(unode
, M_TEMP
);
4043 /* decrement node count */
4044 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4046 ulist
->node_count
= 0;
4048 lck_mtx_destroy(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
);
4051 /* Reclaim old expired user nodes from active user lists. */
4053 nfsrv_active_user_list_reclaim(void)
4055 struct nfs_exportfs
*nxfs
;
4056 struct nfs_export
*nx
;
4057 struct nfs_active_user_list
*ulist
;
4058 struct nfs_user_stat_hashtbl_head oldlist
;
4059 struct nfs_user_stat_node
*unode
, *unode_next
;
4063 LIST_INIT(&oldlist
);
4065 lck_rw_lock_shared(&nfsrv_export_rwlock
);
4067 tstale
= now
.tv_sec
- nfsrv_user_stat_max_idle_sec
;
4068 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
4069 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
4070 /* Scan through all user nodes of this export */
4071 ulist
= &nx
->nx_user_list
;
4072 lck_mtx_lock(&ulist
->user_mutex
);
4073 for (unode
= TAILQ_FIRST(&ulist
->user_lru
); unode
; unode
= unode_next
) {
4074 unode_next
= TAILQ_NEXT(unode
, lru_link
);
4076 /* check if this node has expired */
4077 if (unode
->tm_last
>= tstale
) {
4081 /* Remove node from the active user list */
4082 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4083 LIST_REMOVE(unode
, hash_link
);
4085 /* Add node to temp list */
4086 LIST_INSERT_HEAD(&oldlist
, unode
, hash_link
);
4088 /* decrement node count */
4089 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4090 ulist
->node_count
--;
4092 /* can unlock this export's list now */
4093 lck_mtx_unlock(&ulist
->user_mutex
);
4096 lck_rw_done(&nfsrv_export_rwlock
);
4098 /* Free expired nodes */
4099 while ((unode
= LIST_FIRST(&oldlist
))) {
4100 LIST_REMOVE(unode
, hash_link
);
4101 FREE(unode
, M_TEMP
);
4106 * Maps errno values to nfs error numbers.
4107 * Use NFSERR_IO as the catch all for ones not specifically defined in
4110 static u_char nfsrv_v2errmap
[] = {
4111 NFSERR_PERM
, NFSERR_NOENT
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4112 NFSERR_NXIO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4113 NFSERR_IO
, NFSERR_IO
, NFSERR_ACCES
, NFSERR_IO
, NFSERR_IO
,
4114 NFSERR_IO
, NFSERR_EXIST
, NFSERR_IO
, NFSERR_NODEV
, NFSERR_NOTDIR
,
4115 NFSERR_ISDIR
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4116 NFSERR_IO
, NFSERR_FBIG
, NFSERR_NOSPC
, NFSERR_IO
, NFSERR_ROFS
,
4117 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4118 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4119 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4120 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4121 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4122 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4123 NFSERR_IO
, NFSERR_IO
, NFSERR_NAMETOL
, NFSERR_IO
, NFSERR_IO
,
4124 NFSERR_NOTEMPTY
, NFSERR_IO
, NFSERR_IO
, NFSERR_DQUOT
, NFSERR_STALE
,
4128 * Maps errno values to nfs error numbers.
4129 * Although it is not obvious whether or not NFS clients really care if
4130 * a returned error value is in the specified list for the procedure, the
4131 * safest thing to do is filter them appropriately. For Version 2, the
4132 * X/Open XNFS document is the only specification that defines error values
4133 * for each RPC (The RFC simply lists all possible error values for all RPCs),
4134 * so I have decided to not do this for Version 2.
4135 * The first entry is the default error return and the rest are the valid
4136 * errors for that RPC in increasing numeric order.
4138 static short nfsv3err_null
[] = {
4143 static short nfsv3err_getattr
[] = {
4153 static short nfsv3err_setattr
[] = {
4170 static short nfsv3err_lookup
[] = {
4184 static short nfsv3err_access
[] = {
4194 static short nfsv3err_readlink
[] = {
4207 static short nfsv3err_read
[] = {
4220 static short nfsv3err_write
[] = {
4236 static short nfsv3err_create
[] = {
4254 static short nfsv3err_mkdir
[] = {
4272 static short nfsv3err_symlink
[] = {
4290 static short nfsv3err_mknod
[] = {
4309 static short nfsv3err_remove
[] = {
4324 static short nfsv3err_rmdir
[] = {
4343 static short nfsv3err_rename
[] = {
4367 static short nfsv3err_link
[] = {
4388 static short nfsv3err_readdir
[] = {
4402 static short nfsv3err_readdirplus
[] = {
4417 static short nfsv3err_fsstat
[] = {
4427 static short nfsv3err_fsinfo
[] = {
4436 static short nfsv3err_pathconf
[] = {
4445 static short nfsv3err_commit
[] = {
4455 static short *nfsrv_v3errmap
[] = {
4473 nfsv3err_readdirplus
,
4481 * Map errnos to NFS error numbers. For Version 3 also filter out error
4482 * numbers not specified for the associated procedure.
4485 nfsrv_errmap(struct nfsrv_descript
*nd
, int err
)
4487 short *defaulterrp
, *errp
;
4489 if (nd
->nd_vers
== NFS_VER2
) {
4490 if (err
<= (int)sizeof(nfsrv_v2errmap
)) {
4491 return (int)nfsrv_v2errmap
[err
- 1];
4496 if (nd
->nd_procnum
> NFSPROC_COMMIT
) {
4497 return err
& 0xffff;
4499 errp
= defaulterrp
= nfsrv_v3errmap
[nd
->nd_procnum
];
4503 } else if (*errp
> err
) {
4507 return (int)*defaulterrp
;
4510 #endif /* NFSSERVER */