2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $
68 #include <nfs/nfs_conf.h>
72 * These functions support the macros and help fiddle mbuf chains for
73 * the nfs op functions. They do things like create the rpc header and
74 * copy data between mbuf chains and uio lists.
76 #include <sys/param.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/mount_internal.h>
82 #include <sys/vnode_internal.h>
83 #include <sys/kpi_mbuf.h>
84 #include <sys/socket.h>
87 #include <sys/malloc.h>
88 #include <sys/syscall.h>
89 #include <sys/ubc_internal.h>
90 #include <sys/fcntl.h>
92 #include <sys/domain.h>
93 #include <libkern/OSAtomic.h>
94 #include <kern/thread_call.h>
95 #include <kern/task.h>
98 #include <sys/vmparam.h>
100 #include <sys/time.h>
101 #include <kern/clock.h>
103 #include <nfs/rpcv2.h>
104 #include <nfs/nfsproto.h>
106 #include <nfs/nfsnode.h>
107 #if CONFIG_NFS_CLIENT
108 #define _NFS_XDR_SUBS_FUNCS_ /* define this to get xdrbuf function definitions */
110 #include <nfs/xdr_subs.h>
111 #include <nfs/nfsm_subs.h>
112 #include <nfs/nfs_gss.h>
113 #include <nfs/nfsmount.h>
114 #include <nfs/nfs_lock.h>
116 #include <miscfs/specfs/specdev.h>
118 #include <netinet/in.h>
119 #include <net/kpi_interface.h>
121 #include <sys/utfconv.h>
126 struct nfsstats
__attribute__((aligned(8))) nfsstats
;
127 size_t nfs_mbuf_mhlen
= 0, nfs_mbuf_minclsize
= 0;
130 * functions to convert between NFS and VFS types
133 vtonfs_type(enum vtype vtype
, int nfsvers
)
149 if (nfsvers
> NFS_VER2
) {
153 if (nfsvers
> NFS_VER2
) {
165 nfstov_type(nfstype nvtype
, int nfsvers
)
181 if (nfsvers
> NFS_VER2
) {
185 if (nfsvers
> NFS_VER2
) {
189 if (nfsvers
> NFS_VER3
) {
193 if (nfsvers
> NFS_VER3
) {
202 vtonfsv2_mode(enum vtype vtype
, mode_t m
)
212 return vnode_makeimode(vtype
, m
);
214 return vnode_makeimode(VCHR
, m
);
219 return vnode_makeimode(VNON
, m
);
223 #if CONFIG_NFS_SERVER
226 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
228 int nfsv3_procid
[NFS_NPROCS
] = {
254 #endif /* CONFIG_NFS_SERVER */
257 * and the reverse mapping from generic to Version 2 procedure numbers
259 int nfsv2_procid
[NFS_NPROCS
] = {
287 * initialize NFS's cache of mbuf constants
295 nfs_mbuf_mhlen
= ms
.mhlen
;
296 nfs_mbuf_minclsize
= ms
.minclsize
;
299 #if CONFIG_NFS_SERVER
302 * allocate a list of mbufs to hold the given amount of data
305 nfsm_mbuf_get_list(size_t size
, mbuf_t
*mp
, int *mbcnt
)
308 mbuf_t mhead
, mlast
, m
;
312 mhead
= mlast
= NULL
;
316 nfsm_mbuf_get(error
, &m
, (size
- len
));
323 if (mlast
&& ((error
= mbuf_setnext(mlast
, m
)))) {
327 mlen
= mbuf_maxlen(m
);
328 if ((len
+ mlen
) > size
) {
331 mbuf_setlen(m
, mlen
);
344 #endif /* CONFIG_NFS_SERVER */
347 * nfsm_chain_new_mbuf()
349 * Add a new mbuf to the given chain.
352 nfsm_chain_new_mbuf(struct nfsm_chain
*nmc
, size_t sizehint
)
357 if (nmc
->nmc_flags
& NFSM_CHAIN_FLAG_ADD_CLUSTERS
) {
358 sizehint
= nfs_mbuf_minclsize
;
361 /* allocate a new mbuf */
362 nfsm_mbuf_get(error
, &mb
, sizehint
);
367 panic("got NULL mbuf?");
370 /* do we have a current mbuf? */
372 /* first cap off current mbuf */
373 mbuf_setlen(nmc
->nmc_mcur
, nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
));
374 /* then append the new mbuf */
375 error
= mbuf_setnext(nmc
->nmc_mcur
, mb
);
382 /* set up for using the new mbuf */
384 nmc
->nmc_ptr
= mbuf_data(mb
);
385 nmc
->nmc_left
= mbuf_trailingspace(mb
);
391 * nfsm_chain_add_opaque_f()
393 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
396 nfsm_chain_add_opaque_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
398 uint32_t paddedlen
, tlen
;
401 paddedlen
= nfsm_rndup(len
);
404 if (!nmc
->nmc_left
) {
405 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
410 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
416 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
418 bzero(nmc
->nmc_ptr
, tlen
);
420 nmc
->nmc_ptr
+= tlen
;
421 nmc
->nmc_left
-= tlen
;
433 * nfsm_chain_add_opaque_nopad_f()
435 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
439 nfsm_chain_add_opaque_nopad_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
445 if (nmc
->nmc_left
<= 0) {
446 error
= nfsm_chain_new_mbuf(nmc
, len
);
451 tlen
= MIN(nmc
->nmc_left
, len
);
452 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
453 nmc
->nmc_ptr
+= tlen
;
454 nmc
->nmc_left
-= tlen
;
462 * nfsm_chain_add_uio()
464 * Add "len" bytes of data from "uio" to the given chain.
467 nfsm_chain_add_uio(struct nfsm_chain
*nmc
, uio_t uio
, uint32_t len
)
469 uint32_t paddedlen
, tlen
;
472 paddedlen
= nfsm_rndup(len
);
475 if (!nmc
->nmc_left
) {
476 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
481 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
487 uiomove(nmc
->nmc_ptr
, tlen
, uio
);
489 bzero(nmc
->nmc_ptr
, tlen
);
491 nmc
->nmc_ptr
+= tlen
;
492 nmc
->nmc_left
-= tlen
;
503 * Find the length of the NFS mbuf chain
504 * up to the current encoding/decoding offset.
507 nfsm_chain_offset(struct nfsm_chain
*nmc
)
512 for (mb
= nmc
->nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
513 if (mb
== nmc
->nmc_mcur
) {
514 return len
+ (nmc
->nmc_ptr
- (caddr_t
) mbuf_data(mb
));
523 * nfsm_chain_advance()
525 * Advance an nfsm_chain by "len" bytes.
528 nfsm_chain_advance(struct nfsm_chain
*nmc
, uint32_t len
)
533 if (nmc
->nmc_left
>= len
) {
534 nmc
->nmc_left
-= len
;
538 len
-= nmc
->nmc_left
;
539 nmc
->nmc_mcur
= mb
= mbuf_next(nmc
->nmc_mcur
);
543 nmc
->nmc_ptr
= mbuf_data(mb
);
544 nmc
->nmc_left
= mbuf_len(mb
);
551 * nfsm_chain_reverse()
553 * Reverse decode offset in an nfsm_chain by "len" bytes.
556 nfsm_chain_reverse(struct nfsm_chain
*nmc
, uint32_t len
)
558 uint32_t mlen
, new_offset
;
561 mlen
= nmc
->nmc_ptr
- (caddr_t
) mbuf_data(nmc
->nmc_mcur
);
564 nmc
->nmc_left
+= len
;
568 new_offset
= nfsm_chain_offset(nmc
) - len
;
569 nfsm_chain_dissect_init(error
, nmc
, nmc
->nmc_mhead
);
574 return nfsm_chain_advance(nmc
, new_offset
);
578 * nfsm_chain_get_opaque_pointer_f()
580 * Return a pointer to the next "len" bytes of contiguous data in
581 * the mbuf chain. If the next "len" bytes are not contiguous, we
582 * try to manipulate the mbuf chain so that it is.
584 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
587 nfsm_chain_get_opaque_pointer_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
**pptr
)
590 uint32_t left
, need
, mblen
, cplen
, padlen
;
594 /* move to next mbuf with data */
595 while (nmc
->nmc_mcur
&& (nmc
->nmc_left
== 0)) {
596 mb
= mbuf_next(nmc
->nmc_mcur
);
601 nmc
->nmc_ptr
= mbuf_data(mb
);
602 nmc
->nmc_left
= mbuf_len(mb
);
604 /* check if we've run out of data */
605 if (!nmc
->nmc_mcur
) {
609 /* do we already have a contiguous buffer? */
610 if (nmc
->nmc_left
>= len
) {
611 /* the returned pointer will be the current pointer */
612 *pptr
= (u_char
*)nmc
->nmc_ptr
;
613 error
= nfsm_chain_advance(nmc
, nfsm_rndup(len
));
617 padlen
= nfsm_rndup(len
) - len
;
619 /* we need (len - left) more bytes */
620 mbcur
= nmc
->nmc_mcur
;
621 left
= nmc
->nmc_left
;
624 if (need
> mbuf_trailingspace(mbcur
)) {
626 * The needed bytes won't fit in the current mbuf so we'll
627 * allocate a new mbuf to hold the contiguous range of data.
629 nfsm_mbuf_get(error
, &mb
, len
);
633 /* double check that this mbuf can hold all the data */
634 if (mbuf_maxlen(mb
) < len
) {
639 /* the returned pointer will be the new mbuf's data pointer */
640 *pptr
= ptr
= mbuf_data(mb
);
642 /* copy "left" bytes to the new mbuf */
643 bcopy(nmc
->nmc_ptr
, ptr
, left
);
645 mbuf_setlen(mb
, left
);
647 /* insert the new mbuf between the current and next mbufs */
648 error
= mbuf_setnext(mb
, mbuf_next(mbcur
));
650 error
= mbuf_setnext(mbcur
, mb
);
657 /* reduce current mbuf's length by "left" */
658 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - left
);
661 * update nmc's state to point at the end of the mbuf
662 * where the needed data will be copied to.
664 nmc
->nmc_mcur
= mbcur
= mb
;
666 nmc
->nmc_ptr
= (caddr_t
)ptr
;
668 /* The rest of the data will fit in this mbuf. */
670 /* the returned pointer will be the current pointer */
671 *pptr
= (u_char
*)nmc
->nmc_ptr
;
674 * update nmc's state to point at the end of the mbuf
675 * where the needed data will be copied to.
677 nmc
->nmc_ptr
+= left
;
682 * move the next "need" bytes into the current
683 * mbuf from the mbufs that follow
686 /* extend current mbuf length */
687 mbuf_setlen(mbcur
, mbuf_len(mbcur
) + need
);
689 /* mb follows mbufs we're copying/compacting data from */
690 mb
= mbuf_next(mbcur
);
693 /* copy as much as we need/can */
695 mblen
= mbuf_len(mb
);
696 cplen
= MIN(mblen
, need
);
698 bcopy(ptr
, nmc
->nmc_ptr
, cplen
);
700 * update the mbuf's pointer and length to reflect that
701 * the data was shifted to an earlier mbuf in the chain
703 error
= mbuf_setdata(mb
, ptr
+ cplen
, mblen
- cplen
);
705 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
708 /* update pointer/need */
709 nmc
->nmc_ptr
+= cplen
;
712 /* if more needed, go to next mbuf */
718 /* did we run out of data in the mbuf chain? */
720 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
725 * update nmc's state to point after this contiguous data
727 * "mb" points to the last mbuf we copied data from so we
728 * just set nmc to point at whatever remains in that mbuf.
731 nmc
->nmc_ptr
= mbuf_data(mb
);
732 nmc
->nmc_left
= mbuf_len(mb
);
734 /* move past any padding */
736 error
= nfsm_chain_advance(nmc
, padlen
);
743 * nfsm_chain_get_opaque_f()
745 * Read the next "len" bytes in the chain into "buf".
746 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
749 nfsm_chain_get_opaque_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
*buf
)
751 uint32_t cplen
, padlen
;
754 padlen
= nfsm_rndup(len
) - len
;
756 /* loop through mbufs copying all the data we need */
757 while (len
&& nmc
->nmc_mcur
) {
758 /* copy as much as we need/can */
759 cplen
= MIN(nmc
->nmc_left
, len
);
761 bcopy(nmc
->nmc_ptr
, buf
, cplen
);
762 nmc
->nmc_ptr
+= cplen
;
763 nmc
->nmc_left
-= cplen
;
767 /* if more needed, go to next mbuf */
769 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
771 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
772 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
776 /* did we run out of data in the mbuf chain? */
782 nfsm_chain_adv(error
, nmc
, padlen
);
789 * nfsm_chain_get_uio()
791 * Read the next "len" bytes in the chain into the given uio.
792 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
795 nfsm_chain_get_uio(struct nfsm_chain
*nmc
, uint32_t len
, uio_t uio
)
797 uint32_t cplen
, padlen
;
800 padlen
= nfsm_rndup(len
) - len
;
802 /* loop through mbufs copying all the data we need */
803 while (len
&& nmc
->nmc_mcur
) {
804 /* copy as much as we need/can */
805 cplen
= MIN(nmc
->nmc_left
, len
);
807 error
= uiomove(nmc
->nmc_ptr
, cplen
, uio
);
811 nmc
->nmc_ptr
+= cplen
;
812 nmc
->nmc_left
-= cplen
;
815 /* if more needed, go to next mbuf */
817 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
819 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
820 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
824 /* did we run out of data in the mbuf chain? */
830 nfsm_chain_adv(error
, nmc
, padlen
);
836 #if CONFIG_NFS_CLIENT
839 nfsm_chain_add_string_nfc(struct nfsm_chain
*nmc
, const uint8_t *s
, uint32_t slen
)
841 uint8_t smallbuf
[64];
842 uint8_t *nfcname
= smallbuf
;
843 size_t buflen
= sizeof(smallbuf
), nfclen
;
846 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
847 if (error
== ENAMETOOLONG
) {
849 MALLOC_ZONE(nfcname
, uint8_t *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
851 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
855 /* if we got an error, just use the original string */
857 nfsm_chain_add_string(error
, nmc
, s
, slen
);
859 nfsm_chain_add_string(error
, nmc
, nfcname
, nfclen
);
862 if (nfcname
&& (nfcname
!= smallbuf
)) {
863 FREE_ZONE(nfcname
, MAXPATHLEN
, M_NAMEI
);
869 * Add an NFSv2 "sattr" structure to an mbuf chain
872 nfsm_chain_add_v2sattr_f(struct nfsm_chain
*nmc
, struct vnode_attr
*vap
, uint32_t szrdev
)
876 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
,
877 (VATTR_IS_ACTIVE(vap
, va_mode
) ? vap
->va_mode
: 0600)));
878 nfsm_chain_add_32(error
, nmc
,
879 VATTR_IS_ACTIVE(vap
, va_uid
) ? vap
->va_uid
: (uint32_t)-1);
880 nfsm_chain_add_32(error
, nmc
,
881 VATTR_IS_ACTIVE(vap
, va_gid
) ? vap
->va_gid
: (uint32_t)-1);
882 nfsm_chain_add_32(error
, nmc
, szrdev
);
883 nfsm_chain_add_v2time(error
, nmc
,
884 VATTR_IS_ACTIVE(vap
, va_access_time
) ?
885 &vap
->va_access_time
: NULL
);
886 nfsm_chain_add_v2time(error
, nmc
,
887 VATTR_IS_ACTIVE(vap
, va_modify_time
) ?
888 &vap
->va_modify_time
: NULL
);
894 * Add an NFSv3 "sattr" structure to an mbuf chain
897 nfsm_chain_add_v3sattr_f(
898 __unused
struct nfsmount
*nmp
,
899 struct nfsm_chain
*nmc
,
900 struct vnode_attr
*vap
)
904 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
905 nfsm_chain_add_32(error
, nmc
, TRUE
);
906 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
);
908 nfsm_chain_add_32(error
, nmc
, FALSE
);
910 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
911 nfsm_chain_add_32(error
, nmc
, TRUE
);
912 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
914 nfsm_chain_add_32(error
, nmc
, FALSE
);
916 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
917 nfsm_chain_add_32(error
, nmc
, TRUE
);
918 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
920 nfsm_chain_add_32(error
, nmc
, FALSE
);
922 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
923 nfsm_chain_add_32(error
, nmc
, TRUE
);
924 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
926 nfsm_chain_add_32(error
, nmc
, FALSE
);
928 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
929 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
930 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
932 if (VATTR_IS_ACTIVE(vap
, va_access_time
)) {
933 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
934 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_sec
);
935 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_nsec
);
937 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
939 if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) {
940 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
941 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_sec
);
942 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_nsec
);
944 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
954 * nfsm_chain_get_fh_attr()
956 * Get the file handle and attributes from an mbuf chain. (NFSv2/v3)
959 nfsm_chain_get_fh_attr(
960 struct nfsmount
*nmp
,
961 struct nfsm_chain
*nmc
,
967 struct nfs_vattr
*nvap
)
969 int error
= 0, gotfh
, gotattr
;
973 if (nfsvers
== NFS_VER3
) { /* check for file handle */
974 nfsm_chain_get_32(error
, nmc
, gotfh
);
976 if (!error
&& gotfh
) { /* get file handle */
977 nfsm_chain_get_fh(error
, nmc
, nfsvers
, fhp
);
981 if (nfsvers
== NFS_VER3
) { /* check for file attributes */
982 nfsm_chain_get_32(error
, nmc
, gotattr
);
986 if (!gotfh
) { /* skip attributes */
987 nfsm_chain_adv(error
, nmc
, NFSX_V3FATTR
);
988 } else { /* get attributes */
989 error
= nfs_parsefattr(nmp
, nmc
, nfsvers
, nvap
);
992 /* we need valid attributes in order to call nfs_nget() */
993 if (nfs3_getattr_rpc(NULL
, NFSTOMP(dnp
), fhp
->fh_data
, fhp
->fh_len
, 0, ctx
, nvap
, xidp
)) {
1003 * Get and process NFSv3 WCC data from an mbuf chain
1006 nfsm_chain_get_wcc_data_f(
1007 struct nfsm_chain
*nmc
,
1009 struct timespec
*premtime
,
1016 nfsm_chain_get_32(error
, nmc
, flag
);
1017 if (!error
&& flag
) {
1018 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1019 nfsm_chain_get_32(error
, nmc
, premtime
->tv_sec
);
1020 nfsm_chain_get_32(error
, nmc
, premtime
->tv_nsec
);
1021 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1023 premtime
->tv_sec
= 0;
1024 premtime
->tv_nsec
= 0;
1026 nfsm_chain_postop_attr_update_flag(error
, nmc
, np
, *newpostattr
, xidp
);
1032 * Get the next RPC transaction ID (XID)
1035 nfs_get_xid(uint64_t *xidp
)
1039 lck_mtx_lock(nfs_request_mutex
);
1042 * Derive initial xid from system time.
1044 * Note: it's OK if this code inits nfs_xid to 0 (for example,
1045 * due to a broken clock) because we immediately increment it
1046 * and we guarantee to never use xid 0. So, nfs_xid should only
1047 * ever be 0 the first time this function is called.
1050 nfs_xid
= tv
.tv_sec
<< 12;
1052 if (++nfs_xid
== 0) {
1053 /* Skip zero xid if it should ever happen. */
1057 *xidp
= nfs_xid
+ ((uint64_t)nfs_xidwrap
<< 32);
1058 lck_mtx_unlock(nfs_request_mutex
);
1062 * Build the RPC header and fill in the authorization info.
1063 * Returns the head of the mbuf list and the xid.
1073 struct nfsmount
*nmp
= req
->r_nmp
;
1074 int nfsvers
= nmp
->nm_vers
;
1075 int proc
= ((nfsvers
== NFS_VER2
) ? nfsv2_procid
[req
->r_procnum
] : (int)req
->r_procnum
);
1077 return nfsm_rpchead2(nmp
, nmp
->nm_sotype
, NFS_PROG
, nfsvers
, proc
,
1078 req
->r_auth
, req
->r_cred
, req
, mrest
, xidp
, mreqp
);
1082 * get_auiliary_groups: Gets the supplementary groups from a credential.
1084 * IN: cred: credential to get the associated groups from.
1085 * OUT: groups: An array of gids of NGROUPS size.
1086 * IN: count: The number of groups to get; i.e.; the number of groups the server supports
1088 * returns: The number of groups found.
1090 * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less
1094 get_auxiliary_groups(kauth_cred_t cred
, gid_t groups
[NGROUPS
], int count
)
1097 int maxcount
= count
< NGROUPS
? count
+ 1 : NGROUPS
;
1100 for (i
= 0; i
< NGROUPS
; i
++) {
1101 groups
[i
] = -2; /* Initialize to the nobody group */
1103 (void)kauth_cred_getgroups(cred
, groups
, &maxcount
);
1109 * kauth_get_groups returns the primary group followed by the
1110 * users auxiliary groups. If the number of groups the server supports
1111 * is less than NGROUPS, then we will drop the first group so that
1112 * we can send one more group over the wire.
1116 if (count
< NGROUPS
) {
1117 pgid
= kauth_cred_getgid(cred
);
1118 if (pgid
== groups
[0]) {
1120 for (i
= 0; i
< maxcount
; i
++) {
1121 groups
[i
] = groups
[i
+ 1];
1130 nfsm_rpchead2(__unused
struct nfsmount
*nmp
, int sotype
, int prog
, int vers
, int proc
, int auth_type
,
1131 kauth_cred_t cred
, struct nfsreq
*req
, mbuf_t mrest
, u_int64_t
*xidp
, mbuf_t
*mreqp
)
1134 int error
, i
, auth_len
= 0, authsiz
, reqlen
;
1136 struct nfsm_chain nmreq
;
1137 gid_t grouplist
[NGROUPS
];
1140 /* calculate expected auth length */
1141 switch (auth_type
) {
1147 int count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1152 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1153 if (groupcount
< 0) {
1156 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1163 if (!req
|| !cred
) {
1166 auth_len
= 5 * NFSX_UNSIGNED
+ 0; // zero context handle for now
1168 #endif /* CONFIG_NFS_GSS */
1172 authsiz
= nfsm_rndup(auth_len
);
1174 /* allocate the packet */
1175 headlen
= authsiz
+ 10 * NFSX_UNSIGNED
;
1176 if (sotype
== SOCK_STREAM
) { /* also include room for any RPC Record Mark */
1177 headlen
+= NFSX_UNSIGNED
;
1179 if (headlen
>= nfs_mbuf_minclsize
) {
1180 error
= mbuf_getpacket(MBUF_WAITOK
, &mreq
);
1182 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mreq
);
1184 if (headlen
< nfs_mbuf_mhlen
) {
1185 mbuf_align_32(mreq
, headlen
);
1187 mbuf_align_32(mreq
, 8 * NFSX_UNSIGNED
);
1192 /* unable to allocate packet */
1193 /* XXX should we keep statistics for these errors? */
1198 * If the caller gave us a non-zero XID then use it because
1199 * it may be a higher-level resend with a GSSAPI credential.
1200 * Otherwise, allocate a new one.
1206 /* build the header(s) */
1207 nfsm_chain_init(&nmreq
, mreq
);
1209 /* First, if it's a TCP stream insert space for an RPC record mark */
1210 if (sotype
== SOCK_STREAM
) {
1211 nfsm_chain_add_32(error
, &nmreq
, 0);
1214 /* Then the RPC header. */
1215 nfsm_chain_add_32(error
, &nmreq
, (*xidp
& 0xffffffff));
1216 nfsm_chain_add_32(error
, &nmreq
, RPC_CALL
);
1217 nfsm_chain_add_32(error
, &nmreq
, RPC_VER2
);
1218 nfsm_chain_add_32(error
, &nmreq
, prog
);
1219 nfsm_chain_add_32(error
, &nmreq
, vers
);
1220 nfsm_chain_add_32(error
, &nmreq
, proc
);
1225 switch (auth_type
) {
1227 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* auth */
1228 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1229 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* verf */
1230 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1231 nfsm_chain_build_done(error
, &nmreq
);
1232 /* Append the args mbufs */
1234 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1238 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_SYS
);
1239 nfsm_chain_add_32(error
, &nmreq
, authsiz
);
1241 nfsm_chain_add_32(error
, &nmreq
, 0); /* stamp */
1243 nfsm_chain_add_32(error
, &nmreq
, 0); /* zero-length hostname */
1244 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(cred
)); /* UID */
1245 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getgid(cred
)); /* GID */
1246 nfsm_chain_add_32(error
, &nmreq
, groupcount
);/* additional GIDs */
1247 for (i
= 0; i
< groupcount
; i
++) {
1248 nfsm_chain_add_32(error
, &nmreq
, grouplist
[i
]);
1251 /* And the verifier... */
1252 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* flavor */
1253 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1254 nfsm_chain_build_done(error
, &nmreq
);
1256 /* Append the args mbufs */
1258 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1266 error
= nfs_gss_clnt_cred_put(req
, &nmreq
, mrest
);
1267 if (error
== ENEEDAUTH
) {
1268 int count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1271 * Use sec=sys for this user
1274 req
->r_auth
= auth_type
= RPCAUTH_SYS
;
1275 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1276 if (groupcount
< 0) {
1279 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1280 authsiz
= nfsm_rndup(auth_len
);
1284 #endif /* CONFIG_NFS_GSS */
1288 /* finish setting up the packet */
1290 error
= mbuf_pkthdr_setrcvif(mreq
, 0);
1298 /* Calculate the size of the request */
1300 for (mb
= nmreq
.nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
1301 reqlen
+= mbuf_len(mb
);
1304 mbuf_pkthdr_setlen(mreq
, reqlen
);
1307 * If the request goes on a TCP stream,
1308 * set its size in the RPC record mark.
1309 * The record mark count doesn't include itself
1310 * and the last fragment bit is set.
1312 if (sotype
== SOCK_STREAM
) {
1313 nfsm_chain_set_recmark(error
, &nmreq
,
1314 (reqlen
- NFSX_UNSIGNED
) | 0x80000000);
1322 * Parse an NFS file attribute structure out of an mbuf chain.
1326 __unused
struct nfsmount
*nmp
,
1327 struct nfsm_chain
*nmc
,
1329 struct nfs_vattr
*nvap
)
1341 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TYPE
);
1342 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_MODE
);
1343 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
);
1344 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
);
1345 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
);
1346 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SIZE
);
1347 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SPACE_USED
);
1348 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_RAWDEV
);
1349 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FSID
);
1350 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FILEID
);
1351 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_ACCESS
);
1352 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
);
1353 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_METADATA
);
1355 nfsm_chain_get_32(error
, nmc
, nvtype
);
1356 nfsm_chain_get_32(error
, nmc
, vmode
);
1359 if (nfsvers
== NFS_VER3
) {
1360 nvap
->nva_type
= vtype
= nfstov_type(nvtype
, nfsvers
);
1363 * The duplicate information returned in fa_type and fa_mode
1364 * is an ambiguity in the NFS version 2 protocol.
1366 * VREG should be taken literally as a regular file. If a
1367 * server intends to return some type information differently
1368 * in the upper bits of the mode field (e.g. for sockets, or
1369 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
1370 * leave the examination of the mode bits even in the VREG
1371 * case to avoid breakage for bogus servers, but we make sure
1372 * that there are actually type bits set in the upper part of
1373 * fa_mode (and failing that, trust the va_type field).
1375 * NFSv3 cleared the issue, and requires fa_mode to not
1376 * contain any type information (while also introducing
1377 * sockets and FIFOs for fa_type).
1379 vtype
= nfstov_type(nvtype
, nfsvers
);
1380 if ((vtype
== VNON
) || ((vtype
== VREG
) && ((vmode
& S_IFMT
) != 0))) {
1381 vtype
= IFTOVT(vmode
);
1383 nvap
->nva_type
= vtype
;
1386 nvap
->nva_mode
= (vmode
& 07777);
1388 nfsm_chain_get_32(error
, nmc
, nvap
->nva_nlink
);
1389 nfsm_chain_get_32(error
, nmc
, nvap
->nva_uid
);
1390 nfsm_chain_get_32(error
, nmc
, nvap
->nva_gid
);
1392 if (nfsvers
== NFS_VER3
) {
1393 nfsm_chain_get_64(error
, nmc
, nvap
->nva_size
);
1394 nfsm_chain_get_64(error
, nmc
, nvap
->nva_bytes
);
1395 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata1
);
1396 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata2
);
1398 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fsid
.major
);
1399 nvap
->nva_fsid
.minor
= 0;
1400 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fileid
);
1402 nfsm_chain_get_32(error
, nmc
, nvap
->nva_size
);
1403 nfsm_chain_adv(error
, nmc
, NFSX_UNSIGNED
);
1404 nfsm_chain_get_32(error
, nmc
, rdev
);
1406 nvap
->nva_rawdev
.specdata1
= major(rdev
);
1407 nvap
->nva_rawdev
.specdata2
= minor(rdev
);
1408 nfsm_chain_get_32(error
, nmc
, val
); /* blocks */
1410 nvap
->nva_bytes
= val
* NFS_FABLKSIZE
;
1411 nfsm_chain_get_32(error
, nmc
, val
);
1413 nvap
->nva_fsid
.major
= (uint64_t)val
;
1414 nvap
->nva_fsid
.minor
= 0;
1415 nfsm_chain_get_32(error
, nmc
, val
);
1417 nvap
->nva_fileid
= (uint64_t)val
;
1418 /* Really ugly NFSv2 kludge. */
1419 if ((vtype
== VCHR
) && (rdev
== (dev_t
)0xffffffff)) {
1420 nvap
->nva_type
= VFIFO
;
1423 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1424 nvap
->nva_timesec
[NFSTIME_ACCESS
],
1425 nvap
->nva_timensec
[NFSTIME_ACCESS
]);
1426 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1427 nvap
->nva_timesec
[NFSTIME_MODIFY
],
1428 nvap
->nva_timensec
[NFSTIME_MODIFY
]);
1429 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1430 nvap
->nva_timesec
[NFSTIME_CHANGE
],
1431 nvap
->nva_timensec
[NFSTIME_CHANGE
]);
1439 * Load the attribute cache (that lives in the nfsnode entry) with
1440 * the value pointed to by nvap, unless the file type in the attribute
1441 * cache doesn't match the file type in the nvap, in which case log a
1442 * warning and return ESTALE.
1444 * If the dontshrink flag is set, then it's not safe to call ubc_setsize()
1445 * to shrink the size of the file.
1450 struct nfs_vattr
*nvap
,
1457 struct nfs_vattr
*npnvap
;
1458 int xattr
= np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
;
1459 int referral
= np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
;
1460 int aclbit
, monitored
, error
= 0;
1462 struct nfsmount
*nmp
;
1463 uint32_t events
= np
->n_events
;
1465 if (np
->n_hflag
& NHINIT
) {
1470 mp
= vnode_mount(vp
);
1472 monitored
= vp
? vnode_ismonitored(vp
) : 0;
1474 FSDBG_TOP(527, np
, vp
, *xidp
>> 32, *xidp
);
1476 if (!((nmp
= VFSTONFS(mp
)))) {
1477 FSDBG_BOT(527, ENXIO
, 1, 0, *xidp
);
1481 if (*xidp
< np
->n_xid
) {
1483 * We have already updated attributes with a response from
1484 * a later request. The attributes we have here are probably
1485 * stale so we drop them (just return). However, our
1486 * out-of-order receipt could be correct - if the requests were
1487 * processed out of order at the server. Given the uncertainty
1488 * we invalidate our cached attributes. *xidp is zeroed here
1489 * to indicate the attributes were dropped - only getattr
1490 * cares - it needs to retry the rpc.
1492 NATTRINVALIDATE(np
);
1493 FSDBG_BOT(527, 0, np
, np
->n_xid
, *xidp
);
1498 if (vp
&& (nvap
->nva_type
!= vnode_vtype(vp
))) {
1500 * The filehandle has changed type on us. This can be
1501 * caused by either the server not having unique filehandles
1502 * or because another client has removed the previous
1503 * filehandle and a new object (of a different type)
1504 * has been created with the same filehandle.
1506 * We can't simply switch the type on the vnode because
1507 * there may be type-specific fields that need to be
1508 * cleaned up or set up.
1510 * So, what should we do with this vnode?
1512 * About the best we can do is log a warning and return
1513 * an error. ESTALE is about the closest error, but it
1514 * is a little strange that we come up with this error
1515 * internally instead of simply passing it through from
1516 * the server. Hopefully, the vnode will be reclaimed
1517 * soon so the filehandle can be reincarnated as the new
1520 printf("nfs loadattrcache vnode changed type, was %d now %d\n",
1521 vnode_vtype(vp
), nvap
->nva_type
);
1524 events
|= VNODE_EVENT_DELETE
;
1529 npnvap
= &np
->n_vattr
;
1532 * The ACL cache needs special handling because it is not
1533 * always updated. Save current ACL cache state so it can
1534 * be restored after copying the new attributes into place.
1536 aclbit
= NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1537 acl
= npnvap
->nva_acl
;
1541 * For monitored nodes, check for attribute changes that should generate events.
1543 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
) &&
1544 (nvap
->nva_nlink
!= npnvap
->nva_nlink
)) {
1545 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_LINK
;
1547 if (events
& VNODE_EVENT_PERMS
) {
1548 /* no need to do all the checking if it's already set */;
1549 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_MODE
) &&
1550 (nvap
->nva_mode
!= npnvap
->nva_mode
)) {
1551 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1552 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1553 (nvap
->nva_uid
!= npnvap
->nva_uid
)) {
1554 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1555 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1556 (nvap
->nva_gid
!= npnvap
->nva_gid
)) {
1557 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1559 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1560 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1561 !kauth_guid_equal(&nvap
->nva_uuuid
, &npnvap
->nva_uuuid
)) {
1562 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1563 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1564 !kauth_guid_equal(&nvap
->nva_guuid
, &npnvap
->nva_guuid
)) {
1565 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1566 } else if ((NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
) &&
1567 nvap
->nva_acl
&& npnvap
->nva_acl
&&
1568 ((nvap
->nva_acl
->acl_entrycount
!= npnvap
->nva_acl
->acl_entrycount
) ||
1569 bcmp(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
))))) {
1570 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1576 ((nmp
->nm_vers
>= NFS_VER4
) && (nvap
->nva_change
!= npnvap
->nva_change
)) ||
1578 (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
) &&
1579 ((nvap
->nva_timesec
[NFSTIME_MODIFY
] != npnvap
->nva_timesec
[NFSTIME_MODIFY
]) ||
1580 (nvap
->nva_timensec
[NFSTIME_MODIFY
] != npnvap
->nva_timensec
[NFSTIME_MODIFY
])))) {
1581 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_WRITE
;
1583 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_RAWDEV
) &&
1584 ((nvap
->nva_rawdev
.specdata1
!= npnvap
->nva_rawdev
.specdata1
) ||
1585 (nvap
->nva_rawdev
.specdata2
!= npnvap
->nva_rawdev
.specdata2
))) {
1586 events
|= VNODE_EVENT_ATTRIB
;
1588 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_FILEID
) &&
1589 (nvap
->nva_fileid
!= npnvap
->nva_fileid
)) {
1590 events
|= VNODE_EVENT_ATTRIB
;
1592 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1593 ((nvap
->nva_flags
& NFS_FFLAG_ARCHIVED
) != (npnvap
->nva_flags
& NFS_FFLAG_ARCHIVED
))) {
1594 events
|= VNODE_EVENT_ATTRIB
;
1596 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1597 ((nvap
->nva_flags
& NFS_FFLAG_HIDDEN
) != (npnvap
->nva_flags
& NFS_FFLAG_HIDDEN
))) {
1598 events
|= VNODE_EVENT_ATTRIB
;
1600 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_CREATE
) &&
1601 ((nvap
->nva_timesec
[NFSTIME_CREATE
] != npnvap
->nva_timesec
[NFSTIME_CREATE
]) ||
1602 (nvap
->nva_timensec
[NFSTIME_CREATE
] != npnvap
->nva_timensec
[NFSTIME_CREATE
]))) {
1603 events
|= VNODE_EVENT_ATTRIB
;
1605 if (!events
&& NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_TIME_BACKUP
) &&
1606 ((nvap
->nva_timesec
[NFSTIME_BACKUP
] != npnvap
->nva_timesec
[NFSTIME_BACKUP
]) ||
1607 (nvap
->nva_timensec
[NFSTIME_BACKUP
] != npnvap
->nva_timensec
[NFSTIME_BACKUP
]))) {
1608 events
|= VNODE_EVENT_ATTRIB
;
1612 /* Copy the attributes to the attribute cache */
1613 bcopy((caddr_t
)nvap
, (caddr_t
)npnvap
, sizeof(*nvap
));
1616 np
->n_attrstamp
= now
.tv_sec
;
1618 /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */
1620 nvap
->nva_flags
|= xattr
;
1622 if (vp
&& referral
) {
1623 nvap
->nva_flags
|= referral
;
1626 if (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1627 /* we're updating the ACL */
1628 if (nvap
->nva_acl
) {
1629 /* make a copy of the acl for the cache */
1630 npnvap
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1631 if (npnvap
->nva_acl
) {
1632 bcopy(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1634 /* can't make a copy to cache, invalidate ACL cache */
1635 NFS_BITMAP_CLR(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1641 kauth_acl_free(acl
);
1645 if (NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1646 /* update the ACL timestamp */
1647 np
->n_aclstamp
= now
.tv_sec
;
1649 /* we aren't updating the ACL, so restore original values */
1651 NFS_BITMAP_SET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1653 npnvap
->nva_acl
= acl
;
1659 * For NFSv4, if the fsid doesn't match the fsid for the mount, then
1660 * this node is for a different file system on the server. So we mark
1661 * this node as a trigger node that will trigger the mirror mount.
1663 if ((nmp
->nm_vers
>= NFS_VER4
) && (nvap
->nva_type
== VDIR
) &&
1664 ((np
->n_vattr
.nva_fsid
.major
!= nmp
->nm_fsid
.major
) ||
1665 (np
->n_vattr
.nva_fsid
.minor
!= nmp
->nm_fsid
.minor
))) {
1666 np
->n_vattr
.nva_flags
|= NFS_FFLAG_TRIGGER
;
1668 #endif /* CONFIG_NFS4 */
1669 #endif /* CONFIG_TRIGGERS */
1671 if (!vp
|| (nvap
->nva_type
!= VREG
)) {
1672 np
->n_size
= nvap
->nva_size
;
1673 } else if (nvap
->nva_size
!= np
->n_size
) {
1674 FSDBG(527, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1675 if (!UBCINFOEXISTS(vp
) || (dontshrink
&& (nvap
->nva_size
< np
->n_size
))) {
1676 /* asked not to shrink, so stick with current size */
1677 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001);
1678 nvap
->nva_size
= np
->n_size
;
1679 NATTRINVALIDATE(np
);
1680 } else if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1681 /* if we've modified, stick with larger size */
1682 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0002);
1683 nvap
->nva_size
= np
->n_size
;
1684 npnvap
->nva_size
= np
->n_size
;
1687 * n_size is protected by the data lock, so we need to
1688 * defer updating it until it's safe. We save the new size
1689 * and set a flag and it'll get updated the next time we get/drop
1690 * the data lock or the next time we do a getattr.
1692 np
->n_newsize
= nvap
->nva_size
;
1693 SET(np
->n_flag
, NUPDATESIZE
);
1695 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_EXTEND
;
1700 if (np
->n_flag
& NCHG
) {
1701 if (np
->n_flag
& NACC
) {
1702 nvap
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1703 nvap
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1705 if (np
->n_flag
& NUPD
) {
1706 nvap
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1707 nvap
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1712 if (monitored
&& events
) {
1713 nfs_vnode_notify(np
, events
);
1715 FSDBG_BOT(527, error
, np
, np
->n_size
, *xidp
);
1720 * Calculate the attribute timeout based on
1721 * how recently the file has been modified.
1724 nfs_attrcachetimeout(nfsnode_t np
)
1726 struct nfsmount
*nmp
;
1732 if (nfs_mount_gone(nmp
)) {
1736 isdir
= vnode_isdir(NFSTOV(np
));
1738 if ((nmp
->nm_vers
>= NFS_VER4
) && (np
->n_openflags
& N_DELEG_MASK
)) {
1739 /* If we have a delegation, we always use the max timeout. */
1740 timeo
= isdir
? nmp
->nm_acdirmax
: nmp
->nm_acregmax
;
1743 if ((np
)->n_flag
& NMODIFIED
) {
1744 /* If we have modifications, we always use the min timeout. */
1745 timeo
= isdir
? nmp
->nm_acdirmin
: nmp
->nm_acregmin
;
1747 /* Otherwise, we base the timeout on how old the file seems. */
1748 /* Note that if the client and server clocks are way out of sync, */
1749 /* timeout will probably get clamped to a min or max value */
1751 timeo
= (now
.tv_sec
- (np
)->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]) / 10;
1753 if (timeo
< nmp
->nm_acdirmin
) {
1754 timeo
= nmp
->nm_acdirmin
;
1755 } else if (timeo
> nmp
->nm_acdirmax
) {
1756 timeo
= nmp
->nm_acdirmax
;
1759 if (timeo
< nmp
->nm_acregmin
) {
1760 timeo
= nmp
->nm_acregmin
;
1761 } else if (timeo
> nmp
->nm_acregmax
) {
1762 timeo
= nmp
->nm_acregmax
;
1771 * Check the attribute cache time stamp.
1772 * If the cache is valid, copy contents to *nvaper and return 0
1773 * otherwise return an error.
1774 * Must be called with the node locked.
1777 nfs_getattrcache(nfsnode_t np
, struct nfs_vattr
*nvaper
, int flags
)
1779 struct nfs_vattr
*nvap
;
1780 struct timeval nowup
;
1782 struct nfsmount
*nmp
;
1784 /* Check if the attributes are valid. */
1785 if (!NATTRVALID(np
) || ((flags
& NGA_ACL
) && !NACLVALID(np
))) {
1786 FSDBG(528, np
, 0, 0xffffff01, ENOENT
);
1787 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1792 if (nfs_mount_gone(nmp
)) {
1796 * Verify the cached attributes haven't timed out.
1797 * If the server isn't responding, skip the check
1798 * and return cached attributes.
1800 if (!nfs_use_cache(nmp
)) {
1801 microuptime(&nowup
);
1802 if (np
->n_attrstamp
> nowup
.tv_sec
) {
1803 printf("NFS: Attribute time stamp is in the future by %ld seconds. Invalidating cache\n",
1804 np
->n_attrstamp
- nowup
.tv_sec
);
1805 NATTRINVALIDATE(np
);
1806 NACCESSINVALIDATE(np
);
1809 timeo
= nfs_attrcachetimeout(np
);
1810 if ((nowup
.tv_sec
- np
->n_attrstamp
) >= timeo
) {
1811 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1812 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1815 if ((flags
& NGA_ACL
) && ((nowup
.tv_sec
- np
->n_aclstamp
) >= timeo
)) {
1816 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1817 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1822 nvap
= &np
->n_vattr
;
1823 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, 0xcace);
1824 OSAddAtomic64(1, &nfsstats
.attrcache_hits
);
1826 if (nvap
->nva_type
!= VREG
) {
1827 np
->n_size
= nvap
->nva_size
;
1828 } else if (nvap
->nva_size
!= np
->n_size
) {
1829 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1830 if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1831 /* if we've modified, stick with larger size */
1832 nvap
->nva_size
= np
->n_size
;
1835 * n_size is protected by the data lock, so we need to
1836 * defer updating it until it's safe. We save the new size
1837 * and set a flag and it'll get updated the next time we get/drop
1838 * the data lock or the next time we do a getattr.
1840 np
->n_newsize
= nvap
->nva_size
;
1841 SET(np
->n_flag
, NUPDATESIZE
);
1845 bcopy((caddr_t
)nvap
, (caddr_t
)nvaper
, sizeof(struct nfs_vattr
));
1846 if (np
->n_flag
& NCHG
) {
1847 if (np
->n_flag
& NACC
) {
1848 nvaper
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1849 nvaper
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1851 if (np
->n_flag
& NUPD
) {
1852 nvaper
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1853 nvaper
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1856 if (nvap
->nva_acl
) {
1857 if (flags
& NGA_ACL
) {
1858 nvaper
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1859 if (!nvaper
->nva_acl
) {
1862 bcopy(nvap
->nva_acl
, nvaper
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1864 nvaper
->nva_acl
= NULL
;
1871 * When creating file system objects:
1872 * Don't bother setting UID if it's the same as the credential performing the create.
1873 * Don't bother setting GID if it's the same as the directory or credential.
1876 nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1878 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1879 if (kauth_cred_getuid(vfs_context_ucred(ctx
)) == vap
->va_uid
) {
1880 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1881 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1884 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1885 if ((vap
->va_gid
== dnp
->n_vattr
.nva_gid
) ||
1886 (kauth_cred_getgid(vfs_context_ucred(ctx
)) == vap
->va_gid
)) {
1887 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1888 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1894 * Convert a universal address string to a sockaddr structure.
1896 * Universal addresses can be in the following formats:
1898 * d = decimal (IPv4)
1899 * x = hexadecimal (IPv6)
1900 * p = port (decimal)
1905 * x:x:x:x:x:x:x:x.p.p
1906 * x:x:x:x:x:x:d.d.d.d
1907 * x:x:x:x:x:x:d.d.d.d.p.p
1909 * IPv6 strings can also have a series of zeroes elided
1910 * IPv6 strings can also have a %scope suffix at the end (after any port)
1912 * rules & exceptions:
1913 * - value before : is hex
1914 * - value before . is dec
1915 * - once . hit, all values are dec
1916 * - hex+port case means value before first dot is actually hex
1917 * - . is always preceded by digits except if last hex was double-colon
1919 * scan, converting #s to bytes
1920 * first time a . is encountered, scan the rest to count them.
1921 * 2 dots = just port
1922 * 3 dots = just IPv4 no port
1923 * 5 dots = IPv4 and port
1926 #define IS_DIGIT(C) \
1927 (((C) >= '0') && ((C) <= '9'))
1929 #define IS_XDIGIT(C) \
1931 (((C) >= 'A') && ((C) <= 'F')) || \
1932 (((C) >= 'a') && ((C) <= 'f')))
1935 nfs_uaddr2sockaddr(const char *uaddr
, struct sockaddr
*addr
)
1937 const char *p
, *pd
; /* pointers to current character in scan */
1938 const char *pnum
; /* pointer to current number to decode */
1939 const char *pscope
; /* pointer to IPv6 scope ID */
1940 uint8_t a
[18]; /* octet array to store address bytes */
1941 int i
; /* index of next octet to decode */
1942 int dci
; /* index of octet to insert double-colon zeroes */
1943 int dcount
, xdcount
; /* count of digits in current number */
1944 int needmore
; /* set when we know we need more input (e.g. after colon, period) */
1945 int dots
; /* # of dots */
1946 int hex
; /* contains hex values */
1947 unsigned long val
; /* decoded value */
1948 int s
; /* index used for sliding array to insert elided zeroes */
1950 /* AF_LOCAL address are paths that start with '/' or are empty */
1951 if (*uaddr
== '/' || *uaddr
== '\0') { /* AF_LOCAL address */
1952 struct sockaddr_un
*sun
= (struct sockaddr_un
*)addr
;
1953 sun
->sun_family
= AF_LOCAL
;
1954 sun
->sun_len
= sizeof(struct sockaddr_un
);
1955 strlcpy(sun
->sun_path
, uaddr
, sizeof(sun
->sun_path
));
1961 #define DECIMALVALUE 1
1965 if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \
1967 if (((TYPE) == DECIMALVALUE) && xdcount) \
1969 val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \
1970 if (((TYPE) == DECIMALVALUE) && (val >= 256)) \
1972 /* check if there is room left in the array */ \
1973 if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \
1975 if ((TYPE) == HEXVALUE) \
1976 a[i++] = ((val >> 8) & 0xff); \
1977 a[i++] = (val & 0xff); \
1983 i
= dcount
= xdcount
= 0;
1987 if ((*p
== ':') && (*++p
!= ':')) { /* if it starts with colon, gotta be a double */
1992 if (IS_XDIGIT(*p
)) {
1994 if (!IS_DIGIT(*p
)) {
1999 } else if (*p
== '.') {
2000 /* rest is decimal IPv4 dotted quad and/or port */
2002 /* this is the first, so count them */
2003 for (pd
= p
; *pd
; pd
++) {
2008 } else if (hex
&& (*pd
== '%')) {
2010 } else if ((*pd
< '0') || (*pd
> '9')) {
2014 if ((dots
!= 2) && (dots
!= 3) && (dots
!= 5)) {
2017 if (hex
&& (dots
== 2)) { /* hex+port */
2018 if (!dcount
&& needmore
) {
2021 if (dcount
) { /* last hex may be elided zero */
2030 dcount
= xdcount
= 0;
2033 } else if (*p
== ':') {
2038 if (!dcount
) { /* missing number, probably double colon */
2039 if (dci
>= 0) { /* can only have one double colon */
2046 dcount
= xdcount
= 0;
2050 } else if (*p
== '%') { /* scope ID delimiter */
2057 } else { /* unexpected character */
2061 if (needmore
&& !dcount
) {
2064 if (dcount
) { /* decode trailing number */
2065 GET(dots
? DECIMALVALUE
: HEXVALUE
);
2067 if (dci
>= 0) { /* got a double-colon at i, need to insert a range of zeroes */
2068 /* if we got a port, slide to end of array */
2069 /* otherwise, slide to end of address (non-port) values */
2070 int end
= ((dots
== 2) || (dots
== 5)) ? sizeof(a
) : (sizeof(a
) - 2);
2071 if (i
% 2) { /* length of zero range must be multiple of 2 */
2074 if (i
>= end
) { /* no room? */
2077 /* slide (i-dci) numbers up from index dci */
2078 for (s
= 0; s
< (i
- dci
); s
++) {
2079 a
[end
- 1 - s
] = a
[i
- 1 - s
];
2081 /* zero (end-i) numbers at index dci */
2082 for (s
= 0; s
< (end
- i
); s
++) {
2088 /* copy out resulting socket address */
2090 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
2091 if ((((dots
== 0) || (dots
== 3)) && (i
!= (sizeof(a
) - 2)))) {
2094 if ((((dots
== 2) || (dots
== 5)) && (i
!= sizeof(a
)))) {
2097 bzero(sin6
, sizeof(struct sockaddr_in6
));
2098 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
2099 sin6
->sin6_family
= AF_INET6
;
2100 bcopy(a
, &sin6
->sin6_addr
.s6_addr
, sizeof(struct in6_addr
));
2101 if ((dots
== 5) || (dots
== 2)) {
2102 sin6
->sin6_port
= htons((a
[16] << 8) | a
[17]);
2105 for (p
= pscope
; IS_DIGIT(*p
); p
++) {
2108 if (*p
&& !IS_DIGIT(*p
)) { /* name */
2109 ifnet_t interface
= NULL
;
2110 if (ifnet_find_by_name(pscope
, &interface
) == 0) {
2111 sin6
->sin6_scope_id
= ifnet_index(interface
);
2114 ifnet_release(interface
);
2116 } else { /* decimal number */
2117 sin6
->sin6_scope_id
= strtoul(pscope
, NULL
, 10);
2119 /* XXX should we also embed scope id for linklocal? */
2122 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
2123 if ((dots
!= 3) && (dots
!= 5)) {
2126 if ((dots
== 3) && (i
!= 4)) {
2129 if ((dots
== 5) && (i
!= 6)) {
2132 bzero(sin
, sizeof(struct sockaddr_in
));
2133 sin
->sin_len
= sizeof(struct sockaddr_in
);
2134 sin
->sin_family
= AF_INET
;
2135 bcopy(a
, &sin
->sin_addr
.s_addr
, sizeof(struct in_addr
));
2137 sin
->sin_port
= htons((a
[4] << 8) | a
[5]);
2144 /* NFS Client debugging support */
2145 uint32_t nfs_debug_ctl
;
2147 #include <libkern/libkern.h>
2151 nfs_printf(unsigned int facility
, unsigned int level
, const char *fmt
, ...)
2155 if (NFS_IS_DBG(facility
, level
)) {
2163 #define DISPLAYLEN 16
2168 return ch
>= 0x20 && ch
<= 0x7e;
2172 hexdump(void *data
, size_t len
)
2175 unsigned char *d
= data
;
2176 char *p
, disbuf
[3 * DISPLAYLEN
+ 1];
2178 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
2179 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3) {
2180 snprintf(p
, 4, "%2.2x ", d
[i
+ j
]);
2182 for (; j
< DISPLAYLEN
; j
++, p
+= 3) {
2183 snprintf(p
, 4, " ");
2185 printf("%s ", disbuf
);
2186 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
++) {
2187 snprintf(p
, 2, "%c", isprint(d
[i
+ j
]) ? d
[i
+ j
] : '.');
2189 printf("%s\n", disbuf
);
2194 nfs_dump_mbuf(const char *func
, int lineno
, const char *msg
, mbuf_t mb
)
2198 printf("%s:%d %s\n", func
, lineno
, msg
);
2199 for (m
= mb
; m
; m
= mbuf_next(m
)) {
2200 hexdump(mbuf_data(m
), mbuf_len(m
));
2204 /* Is a mount gone away? */
2206 nfs_mount_gone(struct nfsmount
*nmp
)
2208 return !nmp
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
));
2212 * Return some of the more significant mount options
2213 * as a string, e.g. "'ro,hard,intr,tcp,vers=3,sec=krb5,deadtimeout=0'
2216 nfs_mountopts(struct nfsmount
*nmp
, char *buf
, int buflen
)
2220 c
= snprintf(buf
, buflen
, "%s,%s,%s,%s,vers=%d,sec=%s,%sdeadtimeout=%d",
2221 (vfs_flags(nmp
->nm_mountp
) & MNT_RDONLY
) ? "ro" : "rw",
2222 NMFLAG(nmp
, SOFT
) ? "soft" : "hard",
2223 NMFLAG(nmp
, INTR
) ? "intr" : "nointr",
2224 nmp
->nm_sotype
== SOCK_STREAM
? "tcp" : "udp",
2226 nmp
->nm_auth
== RPCAUTH_KRB5
? "krb5" :
2227 nmp
->nm_auth
== RPCAUTH_KRB5I
? "krb5i" :
2228 nmp
->nm_auth
== RPCAUTH_KRB5P
? "krb5p" :
2229 nmp
->nm_auth
== RPCAUTH_SYS
? "sys" : "none",
2230 nmp
->nm_lockmode
== NFS_LOCK_MODE_ENABLED
? "locks," :
2231 nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
? "nolocks," :
2232 nmp
->nm_lockmode
== NFS_LOCK_MODE_LOCAL
? "locallocks," : "",
2233 nmp
->nm_deadtimeout
);
2235 return c
> buflen
? ENOMEM
: 0;
2238 #endif /* CONFIG_NFS_CLIENT */
2241 * Schedule a callout thread to run an NFS timer function
2242 * interval milliseconds in the future.
2245 nfs_interval_timer_start(thread_call_t call
, int interval
)
2249 clock_interval_to_deadline(interval
, 1000 * 1000, &deadline
);
2250 thread_call_enter_delayed(call
, deadline
);
2254 #if CONFIG_NFS_SERVER
2256 int nfsrv_cmp_secflavs(struct nfs_sec
*, struct nfs_sec
*);
2257 int nfsrv_hang_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2258 int nfsrv_free_netopt(struct radix_node
*, void *);
2259 int nfsrv_free_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2260 struct nfs_export_options
*nfsrv_export_lookup(struct nfs_export
*, mbuf_t
);
2261 struct nfs_export
*nfsrv_fhtoexport(struct nfs_filehandle
*);
2262 struct nfs_user_stat_node
*nfsrv_get_user_stat_node(struct nfs_active_user_list
*, struct sockaddr
*, uid_t
);
2263 void nfsrv_init_user_list(struct nfs_active_user_list
*);
2264 void nfsrv_free_user_list(struct nfs_active_user_list
*);
2267 * add NFSv3 WCC data to an mbuf chain
2270 nfsm_chain_add_wcc_data_f(
2271 struct nfsrv_descript
*nd
,
2272 struct nfsm_chain
*nmc
,
2274 struct vnode_attr
*prevap
,
2276 struct vnode_attr
*postvap
)
2281 nfsm_chain_add_32(error
, nmc
, FALSE
);
2283 nfsm_chain_add_32(error
, nmc
, TRUE
);
2284 nfsm_chain_add_64(error
, nmc
, prevap
->va_data_size
);
2285 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_modify_time
);
2286 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_change_time
);
2288 nfsm_chain_add_postop_attr(error
, nd
, nmc
, postattrerr
, postvap
);
2294 * Extract a lookup path from the given mbufs and store it in
2295 * a newly allocated buffer saved in the given nameidata structure.
2298 nfsm_chain_get_path_namei(
2299 struct nfsm_chain
*nmc
,
2301 struct nameidata
*nip
)
2303 struct componentname
*cnp
= &nip
->ni_cnd
;
2307 if (len
> (MAXPATHLEN
- 1)) {
2308 return ENAMETOOLONG
;
2312 * Get a buffer for the name to be translated, and copy the
2313 * name into the buffer.
2315 MALLOC_ZONE(cnp
->cn_pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
2316 if (!cnp
->cn_pnbuf
) {
2319 cnp
->cn_pnlen
= MAXPATHLEN
;
2320 cnp
->cn_flags
|= HASBUF
;
2322 /* Copy the name from the mbuf list to the string */
2324 nfsm_chain_get_opaque(error
, nmc
, len
, cp
);
2328 cnp
->cn_pnbuf
[len
] = '\0';
2330 /* sanity check the string */
2331 if ((strlen(cp
) != len
) || strchr(cp
, '/')) {
2336 if (cnp
->cn_pnbuf
) {
2337 FREE_ZONE(cnp
->cn_pnbuf
, MAXPATHLEN
, M_NAMEI
);
2339 cnp
->cn_flags
&= ~HASBUF
;
2341 nip
->ni_pathlen
= len
;
2347 * Set up nameidata for a lookup() call and do it.
2351 struct nfsrv_descript
*nd
,
2353 struct nameidata
*nip
,
2354 struct nfs_filehandle
*nfhp
,
2356 struct nfs_export
**nxp
,
2357 struct nfs_export_options
**nxop
)
2361 struct componentname
*cnp
= &nip
->ni_cnd
;
2368 * Extract and set starting directory.
2370 error
= nfsrv_fhtovp(nfhp
, nd
, &dp
, nxp
, nxop
);
2374 error
= nfsrv_credcheck(nd
, ctx
, *nxp
, *nxop
);
2375 if (error
|| (vnode_vtype(dp
) != VDIR
)) {
2382 nip
->ni_cnd
.cn_context
= ctx
;
2384 if (*nxop
&& ((*nxop
)->nxo_flags
& NX_READONLY
)) {
2385 cnp
->cn_flags
|= RDONLY
;
2388 cnp
->cn_flags
|= NOCROSSMOUNT
;
2389 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2390 nip
->ni_usedvp
= nip
->ni_startdir
= dp
;
2391 nip
->ni_rootdir
= rootvnode
;
2394 * And call lookup() to do the real work
2396 cnflags
= nip
->ni_cnd
.cn_flags
; /* store in case we have to restore */
2397 while ((error
= lookup(nip
)) == ERECYCLE
) {
2398 nip
->ni_cnd
.cn_flags
= cnflags
;
2399 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2400 nip
->ni_usedvp
= nip
->ni_dvp
= nip
->ni_startdir
= dp
;
2406 /* Check for encountering a symbolic link */
2407 if (cnp
->cn_flags
& ISSYMLINK
) {
2408 if (cnp
->cn_flags
& (LOCKPARENT
| WANTPARENT
)) {
2409 vnode_put(nip
->ni_dvp
);
2412 vnode_put(nip
->ni_vp
);
2419 tmppn
= cnp
->cn_pnbuf
;
2420 cnp
->cn_pnbuf
= NULL
;
2421 cnp
->cn_flags
&= ~HASBUF
;
2422 FREE_ZONE(tmppn
, cnp
->cn_pnlen
, M_NAMEI
);
2428 * A fiddled version of m_adj() that ensures null fill to a 4-byte
2429 * boundary and only trims off the back end
2432 nfsm_adj(mbuf_t mp
, int len
, int nul
)
2439 * Trim from tail. Scan the mbuf chain,
2440 * calculating its length and finding the last mbuf.
2441 * If the adjustment only affects this mbuf, then just
2442 * adjust and return. Otherwise, rescan and truncate
2443 * after the remaining size.
2450 mnext
= mbuf_next(m
);
2451 if (mnext
== NULL
) {
2458 mbuf_setlen(m
, mlen
);
2460 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2461 for (i
= 0; i
< nul
; i
++) {
2472 * Correct length for chain is "count".
2473 * Find the mbuf with last data, adjust its length,
2474 * and toss data from remaining mbufs on chain.
2476 for (m
= mp
; m
; m
= mbuf_next(m
)) {
2478 if (mlen
>= count
) {
2480 mbuf_setlen(m
, count
);
2482 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2483 for (i
= 0; i
< nul
; i
++) {
2491 for (m
= mbuf_next(m
); m
; m
= mbuf_next(m
)) {
2497 * Trim the header out of the mbuf list and trim off any trailing
2498 * junk so that the mbuf list has only the write data.
2501 nfsm_chain_trim_data(struct nfsm_chain
*nmc
, int len
, int *mlen
)
2503 int cnt
= 0, dlen
, adjust
;
2512 for (m
= nmc
->nmc_mhead
; m
&& (m
!= nmc
->nmc_mcur
); m
= mbuf_next(m
)) {
2519 /* trim current mbuf */
2520 data
= mbuf_data(m
);
2522 adjust
= nmc
->nmc_ptr
- data
;
2524 if ((dlen
> 0) && (adjust
> 0)) {
2525 if (mbuf_setdata(m
, nmc
->nmc_ptr
, dlen
)) {
2529 mbuf_setlen(m
, dlen
);
2532 /* skip next len bytes */
2533 for (; m
&& (cnt
< len
); m
= mbuf_next(m
)) {
2537 /* truncate to end of data */
2538 mbuf_setlen(m
, dlen
- (cnt
- len
));
2539 if (m
== nmc
->nmc_mcur
) {
2540 nmc
->nmc_left
-= (cnt
- len
);
2549 /* trim any trailing data */
2550 if (m
== nmc
->nmc_mcur
) {
2553 for (; m
; m
= mbuf_next(m
)) {
2561 nfsm_chain_add_fattr(
2562 struct nfsrv_descript
*nd
,
2563 struct nfsm_chain
*nmc
,
2564 struct vnode_attr
*vap
)
2568 // XXX Should we assert here that all fields are supported?
2570 nfsm_chain_add_32(error
, nmc
, vtonfs_type(vap
->va_type
, nd
->nd_vers
));
2571 if (nd
->nd_vers
== NFS_VER3
) {
2572 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
& 07777);
2574 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
, vap
->va_mode
));
2576 nfsm_chain_add_32(error
, nmc
, vap
->va_nlink
);
2577 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
2578 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
2579 if (nd
->nd_vers
== NFS_VER3
) {
2580 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
2581 nfsm_chain_add_64(error
, nmc
, vap
->va_data_alloc
);
2582 nfsm_chain_add_32(error
, nmc
, major(vap
->va_rdev
));
2583 nfsm_chain_add_32(error
, nmc
, minor(vap
->va_rdev
));
2584 nfsm_chain_add_64(error
, nmc
, vap
->va_fsid
);
2585 nfsm_chain_add_64(error
, nmc
, vap
->va_fileid
);
2587 nfsm_chain_add_32(error
, nmc
, vap
->va_data_size
);
2588 nfsm_chain_add_32(error
, nmc
, NFS_FABLKSIZE
);
2589 if (vap
->va_type
== VFIFO
) {
2590 nfsm_chain_add_32(error
, nmc
, 0xffffffff);
2592 nfsm_chain_add_32(error
, nmc
, vap
->va_rdev
);
2594 nfsm_chain_add_32(error
, nmc
, vap
->va_data_alloc
/ NFS_FABLKSIZE
);
2595 nfsm_chain_add_32(error
, nmc
, vap
->va_fsid
);
2596 nfsm_chain_add_32(error
, nmc
, vap
->va_fileid
);
2598 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_access_time
);
2599 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_modify_time
);
2600 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_change_time
);
2606 nfsm_chain_get_sattr(
2607 struct nfsrv_descript
*nd
,
2608 struct nfsm_chain
*nmc
,
2609 struct vnode_attr
*vap
)
2614 struct timespec now
;
2616 if (nd
->nd_vers
== NFS_VER2
) {
2618 * There is/was a bug in the Sun client that puts 0xffff in the mode
2619 * field of sattr when it should put in 0xffffffff. The u_short
2620 * doesn't sign extend. So check the low order 2 bytes for 0xffff.
2622 nfsm_chain_get_32(error
, nmc
, val
);
2623 if ((val
& 0xffff) != 0xffff) {
2624 VATTR_SET(vap
, va_mode
, val
& 07777);
2625 /* save the "type" bits for NFSv2 create */
2626 VATTR_SET(vap
, va_type
, IFTOVT(val
));
2627 VATTR_CLEAR_ACTIVE(vap
, va_type
);
2629 nfsm_chain_get_32(error
, nmc
, val
);
2630 if (val
!= (uint32_t)-1) {
2631 VATTR_SET(vap
, va_uid
, val
);
2633 nfsm_chain_get_32(error
, nmc
, val
);
2634 if (val
!= (uint32_t)-1) {
2635 VATTR_SET(vap
, va_gid
, val
);
2637 /* save the "size" bits for NFSv2 create (even if they appear unset) */
2638 nfsm_chain_get_32(error
, nmc
, val
);
2639 VATTR_SET(vap
, va_data_size
, val
);
2640 if (val
== (uint32_t)-1) {
2641 VATTR_CLEAR_ACTIVE(vap
, va_data_size
);
2643 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2644 vap
->va_access_time
.tv_sec
,
2645 vap
->va_access_time
.tv_nsec
);
2646 if (vap
->va_access_time
.tv_sec
!= -1) {
2647 VATTR_SET_ACTIVE(vap
, va_access_time
);
2649 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2650 vap
->va_modify_time
.tv_sec
,
2651 vap
->va_modify_time
.tv_nsec
);
2652 if (vap
->va_modify_time
.tv_sec
!= -1) {
2653 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2659 nfsm_chain_get_32(error
, nmc
, val
);
2661 nfsm_chain_get_32(error
, nmc
, val
);
2662 VATTR_SET(vap
, va_mode
, val
& 07777);
2664 nfsm_chain_get_32(error
, nmc
, val
);
2666 nfsm_chain_get_32(error
, nmc
, val
);
2667 VATTR_SET(vap
, va_uid
, val
);
2669 nfsm_chain_get_32(error
, nmc
, val
);
2671 nfsm_chain_get_32(error
, nmc
, val
);
2672 VATTR_SET(vap
, va_gid
, val
);
2674 nfsm_chain_get_32(error
, nmc
, val
);
2676 nfsm_chain_get_64(error
, nmc
, val64
);
2677 VATTR_SET(vap
, va_data_size
, val64
);
2680 nfsm_chain_get_32(error
, nmc
, val
);
2682 case NFS_TIME_SET_TO_CLIENT
:
2683 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2684 vap
->va_access_time
.tv_sec
,
2685 vap
->va_access_time
.tv_nsec
);
2686 VATTR_SET_ACTIVE(vap
, va_access_time
);
2687 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2689 case NFS_TIME_SET_TO_SERVER
:
2690 VATTR_SET(vap
, va_access_time
, now
);
2691 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2694 nfsm_chain_get_32(error
, nmc
, val
);
2696 case NFS_TIME_SET_TO_CLIENT
:
2697 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2698 vap
->va_modify_time
.tv_sec
,
2699 vap
->va_modify_time
.tv_nsec
);
2700 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2701 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2703 case NFS_TIME_SET_TO_SERVER
:
2704 VATTR_SET(vap
, va_modify_time
, now
);
2705 if (!VATTR_IS_ACTIVE(vap
, va_access_time
)) {
2706 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2715 * Compare two security flavor structs
2718 nfsrv_cmp_secflavs(struct nfs_sec
*sf1
, struct nfs_sec
*sf2
)
2722 if (sf1
->count
!= sf2
->count
) {
2725 for (i
= 0; i
< sf1
->count
; i
++) {
2726 if (sf1
->flavors
[i
] != sf2
->flavors
[i
]) {
2734 * Build hash lists of net addresses and hang them off the NFS export.
2735 * Called by nfsrv_export() to set up the lists of export addresses.
2738 nfsrv_hang_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2740 struct nfs_export_net_args nxna
;
2741 struct nfs_netopt
*no
, *rn_no
;
2742 struct radix_node_head
*rnh
;
2743 struct radix_node
*rn
;
2744 struct sockaddr
*saddr
, *smask
;
2751 uaddr
= unxa
->nxa_nets
;
2752 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2753 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2758 if (nxna
.nxna_addr
.ss_len
> sizeof(struct sockaddr_storage
) ||
2759 nxna
.nxna_mask
.ss_len
> sizeof(struct sockaddr_storage
) ||
2760 nxna
.nxna_addr
.ss_family
> AF_MAX
||
2761 nxna
.nxna_mask
.ss_family
> AF_MAX
) {
2765 if (nxna
.nxna_flags
& (NX_MAPROOT
| NX_MAPALL
)) {
2766 struct posix_cred temp_pcred
;
2767 bzero(&temp_pcred
, sizeof(temp_pcred
));
2768 temp_pcred
.cr_uid
= nxna
.nxna_cred
.cr_uid
;
2769 temp_pcred
.cr_ngroups
= nxna
.nxna_cred
.cr_ngroups
;
2770 for (i
= 0; i
< nxna
.nxna_cred
.cr_ngroups
&& i
< NGROUPS
; i
++) {
2771 temp_pcred
.cr_groups
[i
] = nxna
.nxna_cred
.cr_groups
[i
];
2773 cred
= posix_cred_create(&temp_pcred
);
2774 if (!IS_VALID_CRED(cred
)) {
2781 if (nxna
.nxna_addr
.ss_len
== 0) {
2782 /* No address means this is a default/world export */
2783 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2784 if (IS_VALID_CRED(cred
)) {
2785 kauth_cred_unref(&cred
);
2789 nx
->nx_flags
|= NX_DEFAULTEXPORT
;
2790 nx
->nx_defopt
.nxo_flags
= nxna
.nxna_flags
;
2791 nx
->nx_defopt
.nxo_cred
= cred
;
2792 bcopy(&nxna
.nxna_sec
, &nx
->nx_defopt
.nxo_sec
, sizeof(struct nfs_sec
));
2797 i
= sizeof(struct nfs_netopt
);
2798 i
+= nxna
.nxna_addr
.ss_len
+ nxna
.nxna_mask
.ss_len
;
2799 MALLOC(no
, struct nfs_netopt
*, i
, M_NETADDR
, M_WAITOK
);
2801 if (IS_VALID_CRED(cred
)) {
2802 kauth_cred_unref(&cred
);
2806 bzero(no
, sizeof(struct nfs_netopt
));
2807 no
->no_opt
.nxo_flags
= nxna
.nxna_flags
;
2808 no
->no_opt
.nxo_cred
= cred
;
2809 bcopy(&nxna
.nxna_sec
, &no
->no_opt
.nxo_sec
, sizeof(struct nfs_sec
));
2811 saddr
= (struct sockaddr
*)(no
+ 1);
2812 bcopy(&nxna
.nxna_addr
, saddr
, nxna
.nxna_addr
.ss_len
);
2813 if (nxna
.nxna_mask
.ss_len
) {
2814 smask
= (struct sockaddr
*)((caddr_t
)saddr
+ nxna
.nxna_addr
.ss_len
);
2815 bcopy(&nxna
.nxna_mask
, smask
, nxna
.nxna_mask
.ss_len
);
2819 i
= saddr
->sa_family
;
2820 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2822 * Seems silly to initialize every AF when most are not
2823 * used, do so on demand here
2825 TAILQ_FOREACH(dom
, &domains
, dom_entry
) {
2826 if (dom
->dom_family
== i
&& dom
->dom_rtattach
) {
2827 dom
->dom_rtattach((void **)&nx
->nx_rtable
[i
],
2832 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2833 if (IS_VALID_CRED(cred
)) {
2834 kauth_cred_unref(&cred
);
2836 _FREE(no
, M_NETADDR
);
2840 rn
= (*rnh
->rnh_addaddr
)((caddr_t
)saddr
, (caddr_t
)smask
, rnh
, no
->no_rnodes
);
2843 * One of the reasons that rnh_addaddr may fail is that
2844 * the entry already exists. To check for this case, we
2845 * look up the entry to see if it is there. If so, we
2846 * do not need to make a new entry but do continue.
2848 * XXX should this be rnh_lookup() instead?
2851 rn
= (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
2852 rn_no
= (struct nfs_netopt
*)rn
;
2853 if (rn
!= 0 && (rn
->rn_flags
& RNF_ROOT
) == 0 &&
2854 (rn_no
->no_opt
.nxo_flags
== nxna
.nxna_flags
) &&
2855 (!nfsrv_cmp_secflavs(&rn_no
->no_opt
.nxo_sec
, &nxna
.nxna_sec
))) {
2856 kauth_cred_t cred2
= rn_no
->no_opt
.nxo_cred
;
2857 if (cred
== cred2
) {
2858 /* creds are same (or both NULL) */
2860 } else if (cred
&& cred2
&& (kauth_cred_getuid(cred
) == kauth_cred_getuid(cred2
))) {
2862 * Now compare the effective and
2863 * supplementary groups...
2865 * Note: This comparison, as written,
2866 * does not correctly indicate that
2867 * the groups are equivalent, since
2868 * other than the first supplementary
2869 * group, which is also the effective
2870 * group, order on the remaining groups
2871 * doesn't matter, and this is an
2874 gid_t groups
[NGROUPS
];
2875 gid_t groups2
[NGROUPS
];
2876 int groupcount
= NGROUPS
;
2877 int group2count
= NGROUPS
;
2879 if (!kauth_cred_getgroups(cred
, groups
, &groupcount
) &&
2880 !kauth_cred_getgroups(cred2
, groups2
, &group2count
) &&
2881 groupcount
== group2count
) {
2882 for (i
= 0; i
< group2count
; i
++) {
2883 if (groups
[i
] != groups2
[i
]) {
2887 if (i
>= group2count
|| i
>= NGROUPS
) {
2893 if (IS_VALID_CRED(cred
)) {
2894 kauth_cred_unref(&cred
);
2896 _FREE(no
, M_NETADDR
);
2909 * In order to properly track an export's netopt count, we need to pass
2910 * an additional argument to nfsrv_free_netopt() so that it can decrement
2911 * the export's netopt count.
2913 struct nfsrv_free_netopt_arg
{
2915 struct radix_node_head
*rnh
;
2919 nfsrv_free_netopt(struct radix_node
*rn
, void *w
)
2921 struct nfsrv_free_netopt_arg
*fna
= (struct nfsrv_free_netopt_arg
*)w
;
2922 struct radix_node_head
*rnh
= fna
->rnh
;
2923 uint32_t *cnt
= fna
->cnt
;
2924 struct nfs_netopt
*nno
= (struct nfs_netopt
*)rn
;
2926 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2927 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
2928 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
2930 _FREE((caddr_t
)rn
, M_NETADDR
);
2936 * Free the net address hash lists that are hanging off the mount points.
2939 nfsrv_free_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2941 struct nfs_export_net_args nxna
;
2942 struct radix_node_head
*rnh
;
2943 struct radix_node
*rn
;
2944 struct nfsrv_free_netopt_arg fna
;
2945 struct nfs_netopt
*nno
;
2950 if (!unxa
|| !unxa
->nxa_netcount
) {
2951 /* delete everything */
2952 for (i
= 0; i
<= AF_MAX
; i
++) {
2953 if ((rnh
= nx
->nx_rtable
[i
])) {
2955 fna
.cnt
= &nx
->nx_expcnt
;
2956 (*rnh
->rnh_walktree
)(rnh
, nfsrv_free_netopt
, (caddr_t
)&fna
);
2957 _FREE((caddr_t
)rnh
, M_RTABLE
);
2958 nx
->nx_rtable
[i
] = 0;
2964 /* delete only the exports specified */
2965 uaddr
= unxa
->nxa_nets
;
2966 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2967 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2972 if (nxna
.nxna_addr
.ss_len
== 0) {
2973 /* No address means this is a default/world export */
2974 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2975 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2976 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2977 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2984 if ((rnh
= nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
]) == 0) {
2985 /* AF not initialized? */
2986 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2987 printf("nfsrv_free_addrlist: address not found (0)\n");
2992 rn
= (*rnh
->rnh_lookup
)(&nxna
.nxna_addr
,
2993 nxna
.nxna_mask
.ss_len
? &nxna
.nxna_mask
: NULL
, rnh
);
2994 if (!rn
|| (rn
->rn_flags
& RNF_ROOT
)) {
2995 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2996 printf("nfsrv_free_addrlist: address not found (1)\n");
3001 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
3002 nno
= (struct nfs_netopt
*)rn
;
3003 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
3004 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
3006 _FREE((caddr_t
)rn
, M_NETADDR
);
3009 if (nx
->nx_expcnt
== ((nx
->nx_flags
& NX_DEFAULTEXPORT
) ? 1 : 0)) {
3010 /* no more entries in rnh, so free it up */
3011 _FREE((caddr_t
)rnh
, M_RTABLE
);
3012 nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
] = 0;
3019 void enablequotas(struct mount
*mp
, vfs_context_t ctx
); // XXX
3022 nfsrv_export(struct user_nfs_export_args
*unxa
, vfs_context_t ctx
)
3026 struct nfs_exportfs
*nxfs
, *nxfs2
, *nxfs3
;
3027 struct nfs_export
*nx
, *nx2
, *nx3
;
3028 struct nfs_filehandle nfh
;
3029 struct nameidata mnd
, xnd
;
3030 vnode_t mvp
= NULL
, xvp
= NULL
;
3032 char path
[MAXPATHLEN
];
3033 char fl_pathbuff
[MAXPATHLEN
];
3034 int fl_pathbuff_len
= MAXPATHLEN
;
3037 if (unxa
->nxa_flags
== NXA_CHECK
) {
3038 /* just check if the path is an NFS-exportable file system */
3039 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3043 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3044 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3045 error
= namei(&mnd
);
3050 mp
= vnode_mount(mvp
);
3051 /* make sure it's the root of a file system */
3052 if (!vnode_isvroot(mvp
)) {
3055 /* make sure the file system is NFS-exportable */
3057 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3058 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3060 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3063 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3071 /* all other operations: must be super user */
3072 if ((error
= vfs_context_suser(ctx
))) {
3076 if (unxa
->nxa_flags
& NXA_DELETE_ALL
) {
3077 /* delete all exports on all file systems */
3078 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3079 while ((nxfs
= LIST_FIRST(&nfsrv_exports
))) {
3080 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3082 vfs_clearflags(mp
, MNT_EXPORTED
);
3086 /* delete all exports on this file system */
3087 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3088 LIST_REMOVE(nx
, nx_next
);
3089 LIST_REMOVE(nx
, nx_hash
);
3090 /* delete all netopts for this export */
3091 nfsrv_free_addrlist(nx
, NULL
);
3092 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3093 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3094 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3096 /* free active user list for this export */
3097 nfsrv_free_user_list(&nx
->nx_user_list
);
3098 FREE(nx
->nx_path
, M_TEMP
);
3101 LIST_REMOVE(nxfs
, nxfs_next
);
3102 FREE(nxfs
->nxfs_path
, M_TEMP
);
3105 if (nfsrv_export_hashtbl
) {
3106 /* all exports deleted, clean up export hash table */
3107 FREE(nfsrv_export_hashtbl
, M_TEMP
);
3108 nfsrv_export_hashtbl
= NULL
;
3110 lck_rw_done(&nfsrv_export_rwlock
);
3114 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3119 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3121 /* init export hash table if not already */
3122 if (!nfsrv_export_hashtbl
) {
3123 if (nfsrv_export_hash_size
<= 0) {
3124 nfsrv_export_hash_size
= NFSRVEXPHASHSZ
;
3126 nfsrv_export_hashtbl
= hashinit(nfsrv_export_hash_size
, M_TEMP
, &nfsrv_export_hash
);
3129 // first check if we've already got an exportfs with the given ID
3130 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3131 if (nxfs
->nxfs_id
== unxa
->nxa_fsid
) {
3136 /* verify exported FS path matches given path */
3137 if (strncmp(path
, nxfs
->nxfs_path
, MAXPATHLEN
)) {
3141 if ((unxa
->nxa_flags
& (NXA_ADD
| NXA_OFFLINE
)) == NXA_ADD
) {
3142 /* find exported FS root vnode */
3143 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3144 UIO_SYSSPACE
, CAST_USER_ADDR_T(nxfs
->nxfs_path
), ctx
);
3145 error
= namei(&mnd
);
3150 /* make sure it's (still) the root of a file system */
3151 if (!vnode_isvroot(mvp
)) {
3155 /* if adding, verify that the mount is still what we expect */
3156 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3158 /* check for firmlink-free path */
3159 if (vn_getpath_no_firmlink(mvp
, fl_pathbuff
, &fl_pathbuff_len
) == 0 &&
3160 fl_pathbuff_len
> 0 &&
3161 !strncmp(nxfs
->nxfs_path
, fl_pathbuff
, MAXPATHLEN
)) {
3162 mp
= vfs_getvfs_by_mntonname(vnode_mount(mvp
)->mnt_vfsstat
.f_mntonname
);
3169 /* sanity check: this should be same mount */
3170 if (mp
!= vnode_mount(mvp
)) {
3176 /* no current exported file system with that ID */
3177 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3182 /* find exported FS root vnode */
3183 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3184 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3185 error
= namei(&mnd
);
3187 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3192 /* make sure it's the root of a file system */
3193 if (!vnode_isvroot(mvp
)) {
3194 /* bail if not marked offline */
3195 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3203 mp
= vnode_mount(mvp
);
3206 /* make sure the file system is NFS-exportable */
3207 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3208 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3209 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3212 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3221 /* add an exportfs for it */
3222 MALLOC(nxfs
, struct nfs_exportfs
*, sizeof(struct nfs_exportfs
), M_TEMP
, M_WAITOK
);
3227 bzero(nxfs
, sizeof(struct nfs_exportfs
));
3228 nxfs
->nxfs_id
= unxa
->nxa_fsid
;
3229 MALLOC(nxfs
->nxfs_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3230 if (!nxfs
->nxfs_path
) {
3235 bcopy(path
, nxfs
->nxfs_path
, pathlen
);
3236 /* insert into list in reverse-sorted order */
3238 LIST_FOREACH(nxfs2
, &nfsrv_exports
, nxfs_next
) {
3239 if (strncmp(nxfs
->nxfs_path
, nxfs2
->nxfs_path
, MAXPATHLEN
) > 0) {
3245 LIST_INSERT_BEFORE(nxfs2
, nxfs
, nxfs_next
);
3247 LIST_INSERT_AFTER(nxfs3
, nxfs
, nxfs_next
);
3249 LIST_INSERT_HEAD(&nfsrv_exports
, nxfs
, nxfs_next
);
3252 /* make sure any quotas are enabled before we export the file system */
3254 enablequotas(mp
, ctx
);
3258 if (unxa
->nxa_exppath
) {
3259 error
= copyinstr(unxa
->nxa_exppath
, path
, MAXPATHLEN
, &pathlen
);
3263 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3264 if (nx
->nx_id
== unxa
->nxa_expid
) {
3269 /* verify exported FS path matches given path */
3270 if (strncmp(path
, nx
->nx_path
, MAXPATHLEN
)) {
3275 /* no current export with that ID */
3276 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3280 /* add an export for it */
3281 MALLOC(nx
, struct nfs_export
*, sizeof(struct nfs_export
), M_TEMP
, M_WAITOK
);
3286 bzero(nx
, sizeof(struct nfs_export
));
3287 nx
->nx_id
= unxa
->nxa_expid
;
3289 microtime(&nx
->nx_exptime
);
3290 MALLOC(nx
->nx_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3297 bcopy(path
, nx
->nx_path
, pathlen
);
3298 /* initialize the active user list */
3299 nfsrv_init_user_list(&nx
->nx_user_list
);
3300 /* insert into list in reverse-sorted order */
3302 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3303 if (strncmp(nx
->nx_path
, nx2
->nx_path
, MAXPATHLEN
) > 0) {
3309 LIST_INSERT_BEFORE(nx2
, nx
, nx_next
);
3311 LIST_INSERT_AFTER(nx3
, nx
, nx_next
);
3313 LIST_INSERT_HEAD(&nxfs
->nxfs_exports
, nx
, nx_next
);
3315 /* insert into hash */
3316 LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs
->nxfs_id
, nx
->nx_id
), nx
, nx_hash
);
3319 * We don't allow/support nested exports. Check if the new entry
3320 * nests with the entries before and after or if there's an
3321 * entry for the file system root and subdirs.
3324 if ((nx3
&& !strncmp(nx3
->nx_path
, nx
->nx_path
, pathlen
- 1) &&
3325 (nx3
->nx_path
[pathlen
- 1] == '/')) ||
3326 (nx2
&& !strncmp(nx2
->nx_path
, nx
->nx_path
, strlen(nx2
->nx_path
)) &&
3327 (nx
->nx_path
[strlen(nx2
->nx_path
)] == '/'))) {
3331 /* check export conflict with fs root export and vice versa */
3332 expisroot
= !nx
->nx_path
[0] ||
3333 ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1]);
3334 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3339 } else if (!nx2
->nx_path
[0]) {
3341 } else if ((nx2
->nx_path
[0] == '.') && !nx2
->nx_path
[1]) {
3351 * Don't actually return an error because mountd is
3352 * probably about to delete the conflicting export.
3353 * This can happen when a new export momentarily conflicts
3354 * with an old export while the transition is being made.
3355 * Theoretically, mountd could be written to avoid this
3356 * transient situation - but it would greatly increase the
3357 * complexity of mountd for very little overall benefit.
3359 printf("nfsrv_export: warning: nested exports: %s/%s\n",
3360 nxfs
->nxfs_path
, nx
->nx_path
);
3363 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3365 /* make sure file handle is set up */
3366 if ((nx
->nx_fh
.nfh_xh
.nxh_version
!= htonl(NFS_FH_VERSION
)) ||
3367 (nx
->nx_fh
.nfh_xh
.nxh_flags
& NXHF_INVALIDFH
)) {
3368 /* try to set up export root file handle */
3369 nx
->nx_fh
.nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3370 nx
->nx_fh
.nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3371 nx
->nx_fh
.nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3372 nx
->nx_fh
.nfh_xh
.nxh_flags
= 0;
3373 nx
->nx_fh
.nfh_xh
.nxh_reserved
= 0;
3374 nx
->nx_fh
.nfh_fhp
= (u_char
*)&nx
->nx_fh
.nfh_xh
;
3375 bzero(&nx
->nx_fh
.nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3377 /* find export root vnode */
3378 if (!nx
->nx_path
[0] || ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1])) {
3379 /* exporting file system's root directory */
3383 xnd
.ni_cnd
.cn_nameiop
= LOOKUP
;
3385 xnd
.ni_op
= OP_LOOKUP
;
3387 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3388 xnd
.ni_pathlen
= pathlen
- 1;
3389 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
= path
;
3390 xnd
.ni_startdir
= mvp
;
3391 xnd
.ni_usedvp
= mvp
;
3392 xnd
.ni_rootdir
= rootvnode
;
3393 xnd
.ni_cnd
.cn_context
= ctx
;
3394 while ((error
= lookup(&xnd
)) == ERECYCLE
) {
3395 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3396 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
;
3397 xnd
.ni_usedvp
= xnd
.ni_dvp
= xnd
.ni_startdir
= mvp
;
3405 if (vnode_vtype(xvp
) != VDIR
) {
3411 /* grab file handle */
3412 nx
->nx_fh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3413 error
= VFS_VPTOFH(xvp
, (int*)&nx
->nx_fh
.nfh_len
, &nx
->nx_fh
.nfh_fid
[0], NULL
);
3414 if (!error
&& (nx
->nx_fh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3417 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= nx
->nx_fh
.nfh_len
;
3418 nx
->nx_fh
.nfh_len
+= sizeof(nx
->nx_fh
.nfh_xh
);
3426 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3427 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= 0;
3428 nx
->nx_fh
.nfh_len
= sizeof(nx
->nx_fh
.nfh_xh
);
3435 /* perform the export changes */
3436 if (unxa
->nxa_flags
& NXA_DELETE
) {
3438 /* delete all exports on this file system */
3439 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3440 LIST_REMOVE(nx
, nx_next
);
3441 LIST_REMOVE(nx
, nx_hash
);
3442 /* delete all netopts for this export */
3443 nfsrv_free_addrlist(nx
, NULL
);
3444 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3445 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3446 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3448 /* delete active user list for this export */
3449 nfsrv_free_user_list(&nx
->nx_user_list
);
3450 FREE(nx
->nx_path
, M_TEMP
);
3454 } else if (!unxa
->nxa_netcount
) {
3455 /* delete all netopts for this export */
3456 nfsrv_free_addrlist(nx
, NULL
);
3457 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3458 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3459 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3462 /* delete only the netopts for the given addresses */
3463 error
= nfsrv_free_addrlist(nx
, unxa
);
3469 if (unxa
->nxa_flags
& NXA_ADD
) {
3471 * If going offline set the export time so that when
3472 * coming back on line we will present a new write verifier
3475 if (unxa
->nxa_flags
& NXA_OFFLINE
) {
3476 microtime(&nx
->nx_exptime
);
3479 error
= nfsrv_hang_addrlist(nx
, unxa
);
3481 vfs_setflags(mp
, MNT_EXPORTED
);
3486 if (nx
&& !nx
->nx_expcnt
) {
3487 /* export has no export options */
3488 LIST_REMOVE(nx
, nx_next
);
3489 LIST_REMOVE(nx
, nx_hash
);
3490 /* delete active user list for this export */
3491 nfsrv_free_user_list(&nx
->nx_user_list
);
3492 FREE(nx
->nx_path
, M_TEMP
);
3495 if (LIST_EMPTY(&nxfs
->nxfs_exports
)) {
3496 /* exported file system has no more exports */
3497 LIST_REMOVE(nxfs
, nxfs_next
);
3498 FREE(nxfs
->nxfs_path
, M_TEMP
);
3501 vfs_clearflags(mp
, MNT_EXPORTED
);
3514 lck_rw_done(&nfsrv_export_rwlock
);
3519 * Check if there is a least one export that will allow this address.
3521 * Return 0, if there is an export that will allow this address,
3522 * else return EACCES
3525 nfsrv_check_exports_allow_address(mbuf_t nam
)
3527 struct nfs_exportfs
*nxfs
;
3528 struct nfs_export
*nx
;
3529 struct nfs_export_options
*nxo
= NULL
;
3535 lck_rw_lock_shared(&nfsrv_export_rwlock
);
3536 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3537 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3538 /* A little optimizing by checking for the default first */
3539 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
3540 nxo
= &nx
->nx_defopt
;
3542 if (nxo
|| (nxo
= nfsrv_export_lookup(nx
, nam
))) {
3548 lck_rw_done(&nfsrv_export_rwlock
);
3550 return nxo
? 0 : EACCES
;
3553 struct nfs_export_options
*
3554 nfsrv_export_lookup(struct nfs_export
*nx
, mbuf_t nam
)
3556 struct nfs_export_options
*nxo
= NULL
;
3557 struct nfs_netopt
*no
= NULL
;
3558 struct radix_node_head
*rnh
;
3559 struct sockaddr
*saddr
;
3561 /* Lookup in the export list first. */
3563 saddr
= mbuf_data(nam
);
3564 if (saddr
->sa_family
> AF_MAX
) {
3565 /* Bogus sockaddr? Don't match anything. */
3568 rnh
= nx
->nx_rtable
[saddr
->sa_family
];
3570 no
= (struct nfs_netopt
*)
3571 (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
3572 if (no
&& no
->no_rnodes
->rn_flags
& RNF_ROOT
) {
3580 /* If no address match, use the default if it exists. */
3581 if ((nxo
== NULL
) && (nx
->nx_flags
& NX_DEFAULTEXPORT
)) {
3582 nxo
= &nx
->nx_defopt
;
3587 /* find an export for the given handle */
3589 nfsrv_fhtoexport(struct nfs_filehandle
*nfhp
)
3591 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3592 struct nfs_export
*nx
;
3593 uint32_t fsid
, expid
;
3595 if (!nfsrv_export_hashtbl
) {
3598 fsid
= ntohl(nxh
->nxh_fsid
);
3599 expid
= ntohl(nxh
->nxh_expid
);
3600 nx
= NFSRVEXPHASH(fsid
, expid
)->lh_first
;
3601 for (; nx
; nx
= LIST_NEXT(nx
, nx_hash
)) {
3602 if (nx
->nx_fs
->nxfs_id
!= fsid
) {
3605 if (nx
->nx_id
!= expid
) {
3613 struct nfsrv_getvfs_by_mntonname_callback_args
{
3614 const char *path
; /* IN */
3615 mount_t mp
; /* OUT */
3619 nfsrv_getvfs_by_mntonname_callback(mount_t mp
, void *v
)
3621 struct nfsrv_getvfs_by_mntonname_callback_args
* const args
= v
;
3622 char real_mntonname
[MAXPATHLEN
];
3623 int pathbuflen
= MAXPATHLEN
;
3627 error
= VFS_ROOT(mp
, &rvp
, vfs_context_current());
3631 error
= vn_getpath_ext(rvp
, NULLVP
, real_mntonname
, &pathbuflen
,
3632 VN_GETPATH_FSENTER
| VN_GETPATH_NO_FIRMLINK
);
3637 if (strcmp(args
->path
, real_mntonname
) == 0) {
3638 error
= vfs_busy(mp
, LK_NOWAIT
);
3642 return VFS_RETURNED_DONE
;
3645 return VFS_RETURNED
;
3649 nfsrv_getvfs_by_mntonname(char *path
)
3651 struct nfsrv_getvfs_by_mntonname_callback_args args
= {
3658 mp
= vfs_getvfs_by_mntonname(path
);
3660 error
= vfs_busy(mp
, LK_NOWAIT
);
3665 } else if (vfs_iterate(0, nfsrv_getvfs_by_mntonname_callback
,
3673 * nfsrv_fhtovp() - convert FH to vnode and export info
3677 struct nfs_filehandle
*nfhp
,
3678 struct nfsrv_descript
*nd
,
3680 struct nfs_export
**nxp
,
3681 struct nfs_export_options
**nxop
)
3683 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3684 struct nfs_export_options
*nxo
;
3700 v
= ntohl(nxh
->nxh_version
);
3701 if (v
!= NFS_FH_VERSION
) {
3702 /* file handle format not supported */
3705 if (nfhp
->nfh_len
> NFSV3_MAX_FH_SIZE
) {
3708 if (nfhp
->nfh_len
< (int)sizeof(struct nfs_exphandle
)) {
3711 v
= ntohs(nxh
->nxh_flags
);
3712 if (v
& NXHF_INVALIDFH
) {
3716 *nxp
= nfsrv_fhtoexport(nfhp
);
3721 /* Get the export option structure for this <export, client> tuple. */
3722 *nxop
= nxo
= nfsrv_export_lookup(*nxp
, nam
);
3723 if (nam
&& (*nxop
== NULL
)) {
3728 /* Validate the security flavor of the request */
3729 for (i
= 0, valid
= 0; i
< nxo
->nxo_sec
.count
; i
++) {
3730 if (nd
->nd_sec
== nxo
->nxo_sec
.flavors
[i
]) {
3737 * RFC 2623 section 2.3.2 recommends no authentication
3738 * requirement for certain NFS procedures used for mounting.
3739 * This allows an unauthenticated superuser on the client
3740 * to do mounts for the benefit of authenticated users.
3742 if (nd
->nd_vers
== NFS_VER2
) {
3743 if (nd
->nd_procnum
== NFSV2PROC_GETATTR
||
3744 nd
->nd_procnum
== NFSV2PROC_STATFS
) {
3748 if (nd
->nd_vers
== NFS_VER3
) {
3749 if (nd
->nd_procnum
== NFSPROC_FSINFO
) {
3755 return NFSERR_AUTHERR
| AUTH_REJECTCRED
;
3760 if (nxo
&& (nxo
->nxo_flags
& NX_OFFLINE
)) {
3761 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3764 /* find mount structure */
3765 mp
= nfsrv_getvfs_by_mntonname((*nxp
)->nx_fs
->nxfs_path
);
3768 * We have an export, but no mount?
3769 * Perhaps the export just hasn't been marked offline yet.
3771 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3774 fidp
= nfhp
->nfh_fhp
+ sizeof(*nxh
);
3775 error
= VFS_FHTOVP(mp
, nxh
->nxh_fidlen
, fidp
, vpp
, NULL
);
3780 /* vnode pointer should be good at this point or ... */
3788 * nfsrv_credcheck() - check/map credentials according
3789 * to given export options.
3793 struct nfsrv_descript
*nd
,
3795 __unused
struct nfs_export
*nx
,
3796 struct nfs_export_options
*nxo
)
3798 if (nxo
&& nxo
->nxo_cred
) {
3799 if ((nxo
->nxo_flags
& NX_MAPALL
) ||
3800 ((nxo
->nxo_flags
& NX_MAPROOT
) && !suser(nd
->nd_cr
, NULL
))) {
3801 kauth_cred_ref(nxo
->nxo_cred
);
3802 kauth_cred_unref(&nd
->nd_cr
);
3803 nd
->nd_cr
= nxo
->nxo_cred
;
3806 ctx
->vc_ucred
= nd
->nd_cr
;
3811 * nfsrv_vptofh() - convert vnode to file handle for given export
3813 * If the caller is passing in a vnode for a ".." directory entry,
3814 * they can pass a directory NFS file handle (dnfhp) which will be
3815 * checked against the root export file handle. If it matches, we
3816 * refuse to provide the file handle for the out-of-export directory.
3820 struct nfs_export
*nx
,
3822 struct nfs_filehandle
*dnfhp
,
3825 struct nfs_filehandle
*nfhp
)
3828 uint32_t maxfidsize
;
3830 nfhp
->nfh_fhp
= (u_char
*)&nfhp
->nfh_xh
;
3831 nfhp
->nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3832 nfhp
->nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3833 nfhp
->nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3834 nfhp
->nfh_xh
.nxh_flags
= 0;
3835 nfhp
->nfh_xh
.nxh_reserved
= 0;
3837 if (nfsvers
== NFS_VER2
) {
3838 bzero(&nfhp
->nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3841 /* if directory FH matches export root, return invalid FH */
3842 if (dnfhp
&& nfsrv_fhmatch(dnfhp
, &nx
->nx_fh
)) {
3843 if (nfsvers
== NFS_VER2
) {
3844 nfhp
->nfh_len
= NFSX_V2FH
;
3846 nfhp
->nfh_len
= sizeof(nfhp
->nfh_xh
);
3848 nfhp
->nfh_xh
.nxh_fidlen
= 0;
3849 nfhp
->nfh_xh
.nxh_flags
= htons(NXHF_INVALIDFH
);
3853 if (nfsvers
== NFS_VER2
) {
3854 maxfidsize
= NFSV2_MAX_FID_SIZE
;
3856 maxfidsize
= NFSV3_MAX_FID_SIZE
;
3858 nfhp
->nfh_len
= maxfidsize
;
3860 error
= VFS_VPTOFH(vp
, (int*)&nfhp
->nfh_len
, &nfhp
->nfh_fid
[0], ctx
);
3864 if (nfhp
->nfh_len
> maxfidsize
) {
3867 nfhp
->nfh_xh
.nxh_fidlen
= nfhp
->nfh_len
;
3868 nfhp
->nfh_len
+= sizeof(nfhp
->nfh_xh
);
3869 if ((nfsvers
== NFS_VER2
) && (nfhp
->nfh_len
< NFSX_V2FH
)) {
3870 nfhp
->nfh_len
= NFSX_V2FH
;
3877 * Compare two file handles to see it they're the same.
3878 * Note that we don't use nfh_len because that may include
3879 * padding in an NFSv2 file handle.
3882 nfsrv_fhmatch(struct nfs_filehandle
*fh1
, struct nfs_filehandle
*fh2
)
3884 struct nfs_exphandle
*nxh1
, *nxh2
;
3887 nxh1
= (struct nfs_exphandle
*)fh1
->nfh_fhp
;
3888 nxh2
= (struct nfs_exphandle
*)fh2
->nfh_fhp
;
3889 len1
= sizeof(fh1
->nfh_xh
) + nxh1
->nxh_fidlen
;
3890 len2
= sizeof(fh2
->nfh_xh
) + nxh2
->nxh_fidlen
;
3894 if (bcmp(nxh1
, nxh2
, len1
)) {
3901 * Functions for dealing with active user lists
3905 * Search the hash table for a user node with a matching IP address and uid field.
3906 * If found, the node's tm_last timestamp is updated and the node is returned.
3908 * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned.
3909 * Returns NULL if a new node could not be allcoated.
3911 * The list's user_mutex lock MUST be held.
3913 struct nfs_user_stat_node
*
3914 nfsrv_get_user_stat_node(struct nfs_active_user_list
*list
, struct sockaddr
*saddr
, uid_t uid
)
3916 struct nfs_user_stat_node
*unode
;
3918 struct nfs_user_stat_hashtbl_head
*head
;
3920 /* seach the hash table */
3921 head
= NFS_USER_STAT_HASH(list
->user_hashtbl
, uid
);
3922 LIST_FOREACH(unode
, head
, hash_link
) {
3923 if ((uid
== unode
->uid
) && (nfs_sockaddr_cmp(saddr
, (struct sockaddr
*)&unode
->sock
) == 0)) {
3924 /* found matching node */
3930 /* found node in the hash table, now update lru position */
3931 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3932 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3934 /* update time stamp */
3936 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3940 if (list
->node_count
< nfsrv_user_stat_max_nodes
) {
3941 /* Allocate a new node */
3942 MALLOC(unode
, struct nfs_user_stat_node
*, sizeof(struct nfs_user_stat_node
),
3943 M_TEMP
, M_WAITOK
| M_ZERO
);
3949 /* increment node count */
3950 OSAddAtomic(1, &nfsrv_user_stat_node_count
);
3953 /* reuse the oldest node in the lru list */
3954 unode
= TAILQ_FIRST(&list
->user_lru
);
3960 /* Remove the node */
3961 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3962 LIST_REMOVE(unode
, hash_link
);
3965 /* Initialize the node */
3967 bcopy(saddr
, &unode
->sock
, saddr
->sa_len
);
3970 unode
->bytes_read
= 0;
3971 unode
->bytes_written
= 0;
3972 unode
->tm_start
= (uint32_t)now
.tv_sec
;
3973 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3975 /* insert the node */
3976 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3977 LIST_INSERT_HEAD(head
, unode
, hash_link
);
3983 nfsrv_update_user_stat(struct nfs_export
*nx
, struct nfsrv_descript
*nd
, uid_t uid
, u_int ops
, u_int rd_bytes
, u_int wr_bytes
)
3985 struct nfs_user_stat_node
*unode
;
3986 struct nfs_active_user_list
*ulist
;
3987 struct sockaddr
*saddr
;
3989 if ((!nfsrv_user_stat_enabled
) || (!nx
) || (!nd
) || (!nd
->nd_nam
)) {
3993 saddr
= (struct sockaddr
*)mbuf_data(nd
->nd_nam
);
3995 /* check address family before going any further */
3996 if ((saddr
->sa_family
!= AF_INET
) && (saddr
->sa_family
!= AF_INET6
)) {
4000 ulist
= &nx
->nx_user_list
;
4002 /* lock the active user list */
4003 lck_mtx_lock(&ulist
->user_mutex
);
4005 /* get the user node */
4006 unode
= nfsrv_get_user_stat_node(ulist
, saddr
, uid
);
4009 lck_mtx_unlock(&ulist
->user_mutex
);
4013 /* update counters */
4015 unode
->bytes_read
+= rd_bytes
;
4016 unode
->bytes_written
+= wr_bytes
;
4019 lck_mtx_unlock(&ulist
->user_mutex
);
4022 /* initialize an active user list */
4024 nfsrv_init_user_list(struct nfs_active_user_list
*ulist
)
4028 /* initialize the lru */
4029 TAILQ_INIT(&ulist
->user_lru
);
4031 /* initialize the hash table */
4032 for (i
= 0; i
< NFS_USER_STAT_HASH_SIZE
; i
++) {
4033 LIST_INIT(&ulist
->user_hashtbl
[i
]);
4035 ulist
->node_count
= 0;
4037 lck_mtx_init(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
, LCK_ATTR_NULL
);
4040 /* Free all nodes in an active user list */
4042 nfsrv_free_user_list(struct nfs_active_user_list
*ulist
)
4044 struct nfs_user_stat_node
*unode
;
4050 while ((unode
= TAILQ_FIRST(&ulist
->user_lru
))) {
4051 /* Remove node and free */
4052 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4053 LIST_REMOVE(unode
, hash_link
);
4054 FREE(unode
, M_TEMP
);
4056 /* decrement node count */
4057 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4059 ulist
->node_count
= 0;
4061 lck_mtx_destroy(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
);
4064 /* Reclaim old expired user nodes from active user lists. */
4066 nfsrv_active_user_list_reclaim(void)
4068 struct nfs_exportfs
*nxfs
;
4069 struct nfs_export
*nx
;
4070 struct nfs_active_user_list
*ulist
;
4071 struct nfs_user_stat_hashtbl_head oldlist
;
4072 struct nfs_user_stat_node
*unode
, *unode_next
;
4076 LIST_INIT(&oldlist
);
4078 lck_rw_lock_shared(&nfsrv_export_rwlock
);
4080 tstale
= now
.tv_sec
- nfsrv_user_stat_max_idle_sec
;
4081 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
4082 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
4083 /* Scan through all user nodes of this export */
4084 ulist
= &nx
->nx_user_list
;
4085 lck_mtx_lock(&ulist
->user_mutex
);
4086 for (unode
= TAILQ_FIRST(&ulist
->user_lru
); unode
; unode
= unode_next
) {
4087 unode_next
= TAILQ_NEXT(unode
, lru_link
);
4089 /* check if this node has expired */
4090 if (unode
->tm_last
>= tstale
) {
4094 /* Remove node from the active user list */
4095 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4096 LIST_REMOVE(unode
, hash_link
);
4098 /* Add node to temp list */
4099 LIST_INSERT_HEAD(&oldlist
, unode
, hash_link
);
4101 /* decrement node count */
4102 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4103 ulist
->node_count
--;
4105 /* can unlock this export's list now */
4106 lck_mtx_unlock(&ulist
->user_mutex
);
4109 lck_rw_done(&nfsrv_export_rwlock
);
4111 /* Free expired nodes */
4112 while ((unode
= LIST_FIRST(&oldlist
))) {
4113 LIST_REMOVE(unode
, hash_link
);
4114 FREE(unode
, M_TEMP
);
4119 * Maps errno values to nfs error numbers.
4120 * Use NFSERR_IO as the catch all for ones not specifically defined in
4123 static u_char nfsrv_v2errmap
[] = {
4124 NFSERR_PERM
, NFSERR_NOENT
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4125 NFSERR_NXIO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4126 NFSERR_IO
, NFSERR_IO
, NFSERR_ACCES
, NFSERR_IO
, NFSERR_IO
,
4127 NFSERR_IO
, NFSERR_EXIST
, NFSERR_IO
, NFSERR_NODEV
, NFSERR_NOTDIR
,
4128 NFSERR_ISDIR
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4129 NFSERR_IO
, NFSERR_FBIG
, NFSERR_NOSPC
, NFSERR_IO
, NFSERR_ROFS
,
4130 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4131 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4132 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4133 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4134 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4135 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4136 NFSERR_IO
, NFSERR_IO
, NFSERR_NAMETOL
, NFSERR_IO
, NFSERR_IO
,
4137 NFSERR_NOTEMPTY
, NFSERR_IO
, NFSERR_IO
, NFSERR_DQUOT
, NFSERR_STALE
,
4141 * Maps errno values to nfs error numbers.
4142 * Although it is not obvious whether or not NFS clients really care if
4143 * a returned error value is in the specified list for the procedure, the
4144 * safest thing to do is filter them appropriately. For Version 2, the
4145 * X/Open XNFS document is the only specification that defines error values
4146 * for each RPC (The RFC simply lists all possible error values for all RPCs),
4147 * so I have decided to not do this for Version 2.
4148 * The first entry is the default error return and the rest are the valid
4149 * errors for that RPC in increasing numeric order.
4151 static short nfsv3err_null
[] = {
4156 static short nfsv3err_getattr
[] = {
4166 static short nfsv3err_setattr
[] = {
4183 static short nfsv3err_lookup
[] = {
4197 static short nfsv3err_access
[] = {
4207 static short nfsv3err_readlink
[] = {
4220 static short nfsv3err_read
[] = {
4233 static short nfsv3err_write
[] = {
4249 static short nfsv3err_create
[] = {
4267 static short nfsv3err_mkdir
[] = {
4285 static short nfsv3err_symlink
[] = {
4303 static short nfsv3err_mknod
[] = {
4322 static short nfsv3err_remove
[] = {
4337 static short nfsv3err_rmdir
[] = {
4356 static short nfsv3err_rename
[] = {
4380 static short nfsv3err_link
[] = {
4401 static short nfsv3err_readdir
[] = {
4415 static short nfsv3err_readdirplus
[] = {
4430 static short nfsv3err_fsstat
[] = {
4440 static short nfsv3err_fsinfo
[] = {
4449 static short nfsv3err_pathconf
[] = {
4458 static short nfsv3err_commit
[] = {
4468 static short *nfsrv_v3errmap
[] = {
4486 nfsv3err_readdirplus
,
4494 * Map errnos to NFS error numbers. For Version 3 also filter out error
4495 * numbers not specified for the associated procedure.
4498 nfsrv_errmap(struct nfsrv_descript
*nd
, int err
)
4500 short *defaulterrp
, *errp
;
4502 if (nd
->nd_vers
== NFS_VER2
) {
4503 if (err
<= (int)sizeof(nfsrv_v2errmap
)) {
4504 return (int)nfsrv_v2errmap
[err
- 1];
4509 if (nd
->nd_procnum
> NFSPROC_COMMIT
) {
4510 return err
& 0xffff;
4512 errp
= defaulterrp
= nfsrv_v3errmap
[nd
->nd_procnum
];
4516 } else if (*errp
> err
) {
4520 return (int)*defaulterrp
;
4523 #endif /* CONFIG_NFS_SERVER */
4525 #endif /* CONFIG_NFS */