2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $
68 #include <nfs/nfs_conf.h>
72 * These functions support the macros and help fiddle mbuf chains for
73 * the nfs op functions. They do things like create the rpc header and
74 * copy data between mbuf chains and uio lists.
76 #include <sys/param.h>
78 #include <sys/kauth.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/mount_internal.h>
82 #include <sys/vnode_internal.h>
83 #include <sys/kpi_mbuf.h>
84 #include <sys/socket.h>
87 #include <sys/malloc.h>
88 #include <sys/syscall.h>
89 #include <sys/ubc_internal.h>
90 #include <sys/fcntl.h>
92 #include <sys/domain.h>
93 #include <libkern/OSAtomic.h>
94 #include <kern/thread_call.h>
95 #include <kern/task.h>
98 #include <sys/vmparam.h>
100 #include <sys/time.h>
101 #include <kern/clock.h>
103 #include <nfs/rpcv2.h>
104 #include <nfs/nfsproto.h>
106 #include <nfs/nfsnode.h>
107 #if CONFIG_NFS_CLIENT
108 #define _NFS_XDR_SUBS_FUNCS_ /* define this to get xdrbuf function definitions */
110 #include <nfs/xdr_subs.h>
111 #include <nfs/nfsm_subs.h>
112 #include <nfs/nfs_gss.h>
113 #include <nfs/nfsmount.h>
114 #include <nfs/nfs_lock.h>
116 #include <miscfs/specfs/specdev.h>
118 #include <netinet/in.h>
119 #include <net/kpi_interface.h>
121 #include <sys/utfconv.h>
126 struct nfsstats
__attribute__((aligned(8))) nfsstats
;
127 size_t nfs_mbuf_mhlen
= 0, nfs_mbuf_minclsize
= 0;
130 * functions to convert between NFS and VFS types
133 vtonfs_type(enum vtype vtype
, int nfsvers
)
149 if (nfsvers
> NFS_VER2
) {
154 if (nfsvers
> NFS_VER2
) {
167 nfstov_type(nfstype nvtype
, int nfsvers
)
183 if (nfsvers
> NFS_VER2
) {
188 if (nfsvers
> NFS_VER2
) {
193 if (nfsvers
> NFS_VER3
) {
198 if (nfsvers
> NFS_VER3
) {
208 vtonfsv2_mode(enum vtype vtype
, mode_t m
)
218 return vnode_makeimode(vtype
, m
);
220 return vnode_makeimode(VCHR
, m
);
225 return vnode_makeimode(VNON
, m
);
229 #if CONFIG_NFS_SERVER
232 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
234 int nfsv3_procid
[NFS_NPROCS
] = {
260 #endif /* CONFIG_NFS_SERVER */
263 * and the reverse mapping from generic to Version 2 procedure numbers
265 int nfsv2_procid
[NFS_NPROCS
] = {
293 * initialize NFS's cache of mbuf constants
301 nfs_mbuf_mhlen
= ms
.mhlen
;
302 nfs_mbuf_minclsize
= ms
.minclsize
;
305 #if CONFIG_NFS_SERVER
308 * allocate a list of mbufs to hold the given amount of data
311 nfsm_mbuf_get_list(size_t size
, mbuf_t
*mp
, int *mbcnt
)
314 mbuf_t mhead
, mlast
, m
;
318 mhead
= mlast
= NULL
;
322 nfsm_mbuf_getcluster(error
, &m
, (size
- len
));
329 if (mlast
&& ((error
= mbuf_setnext(mlast
, m
)))) {
333 mlen
= mbuf_maxlen(m
);
334 if ((len
+ mlen
) > size
) {
337 mbuf_setlen(m
, mlen
);
350 #endif /* CONFIG_NFS_SERVER */
353 * nfsm_chain_new_mbuf()
355 * Add a new mbuf to the given chain.
358 nfsm_chain_new_mbuf(struct nfsm_chain
*nmc
, size_t sizehint
)
363 if (nmc
->nmc_flags
& NFSM_CHAIN_FLAG_ADD_CLUSTERS
) {
364 sizehint
= nfs_mbuf_minclsize
;
367 /* allocate a new mbuf */
368 nfsm_mbuf_getcluster(error
, &mb
, sizehint
);
373 panic("got NULL mbuf?");
376 /* do we have a current mbuf? */
378 /* first cap off current mbuf */
379 mbuf_setlen(nmc
->nmc_mcur
, nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
));
380 /* then append the new mbuf */
381 error
= mbuf_setnext(nmc
->nmc_mcur
, mb
);
388 /* set up for using the new mbuf */
390 nmc
->nmc_ptr
= mbuf_data(mb
);
391 nmc
->nmc_left
= mbuf_trailingspace(mb
);
397 * nfsm_chain_add_opaque_f()
399 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
402 nfsm_chain_add_opaque_f(struct nfsm_chain
*nmc
, const u_char
*buf
, size_t len
)
404 size_t paddedlen
, tlen
;
407 paddedlen
= nfsm_rndup(len
);
410 if (!nmc
->nmc_left
) {
411 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
416 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
422 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
424 bzero(nmc
->nmc_ptr
, tlen
);
426 nmc
->nmc_ptr
+= tlen
;
427 nmc
->nmc_left
-= tlen
;
439 * nfsm_chain_add_opaque_nopad_f()
441 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
445 nfsm_chain_add_opaque_nopad_f(struct nfsm_chain
*nmc
, const u_char
*buf
, size_t len
)
451 if (nmc
->nmc_left
<= 0) {
452 error
= nfsm_chain_new_mbuf(nmc
, len
);
457 tlen
= MIN(nmc
->nmc_left
, len
);
458 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
459 nmc
->nmc_ptr
+= tlen
;
460 nmc
->nmc_left
-= tlen
;
468 * nfsm_chain_add_uio()
470 * Add "len" bytes of data from "uio" to the given chain.
473 nfsm_chain_add_uio(struct nfsm_chain
*nmc
, uio_t uio
, size_t len
)
475 size_t paddedlen
, tlen
;
478 paddedlen
= nfsm_rndup(len
);
481 if (!nmc
->nmc_left
) {
482 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
487 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
490 tlen
= MIN(INT32_MAX
, MIN(tlen
, len
));
491 uiomove(nmc
->nmc_ptr
, (int)tlen
, uio
);
493 bzero(nmc
->nmc_ptr
, tlen
);
495 nmc
->nmc_ptr
+= tlen
;
496 nmc
->nmc_left
-= tlen
;
507 * Find the length of the NFS mbuf chain
508 * up to the current encoding/decoding offset.
511 nfsm_chain_offset(struct nfsm_chain
*nmc
)
516 for (mb
= nmc
->nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
517 if (mb
== nmc
->nmc_mcur
) {
518 return len
+ (nmc
->nmc_ptr
- (caddr_t
) mbuf_data(mb
));
527 * nfsm_chain_advance()
529 * Advance an nfsm_chain by "len" bytes.
532 nfsm_chain_advance(struct nfsm_chain
*nmc
, size_t len
)
537 if (nmc
->nmc_left
>= len
) {
538 nmc
->nmc_left
-= len
;
542 len
-= nmc
->nmc_left
;
543 nmc
->nmc_mcur
= mb
= mbuf_next(nmc
->nmc_mcur
);
547 nmc
->nmc_ptr
= mbuf_data(mb
);
548 nmc
->nmc_left
= mbuf_len(mb
);
555 * nfsm_chain_reverse()
557 * Reverse decode offset in an nfsm_chain by "len" bytes.
560 nfsm_chain_reverse(struct nfsm_chain
*nmc
, size_t len
)
562 size_t mlen
, new_offset
;
565 mlen
= nmc
->nmc_ptr
- (caddr_t
) mbuf_data(nmc
->nmc_mcur
);
568 nmc
->nmc_left
+= len
;
572 new_offset
= nfsm_chain_offset(nmc
) - len
;
573 nfsm_chain_dissect_init(error
, nmc
, nmc
->nmc_mhead
);
578 return nfsm_chain_advance(nmc
, new_offset
);
582 * nfsm_chain_get_opaque_pointer_f()
584 * Return a pointer to the next "len" bytes of contiguous data in
585 * the mbuf chain. If the next "len" bytes are not contiguous, we
586 * try to manipulate the mbuf chain so that it is.
588 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
591 nfsm_chain_get_opaque_pointer_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
**pptr
)
595 size_t mblen
, cplen
, need
, left
;
599 /* move to next mbuf with data */
600 while (nmc
->nmc_mcur
&& (nmc
->nmc_left
== 0)) {
601 mb
= mbuf_next(nmc
->nmc_mcur
);
606 nmc
->nmc_ptr
= mbuf_data(mb
);
607 nmc
->nmc_left
= mbuf_len(mb
);
609 /* check if we've run out of data */
610 if (!nmc
->nmc_mcur
) {
614 /* do we already have a contiguous buffer? */
615 if (nmc
->nmc_left
>= len
) {
616 /* the returned pointer will be the current pointer */
617 *pptr
= (u_char
*)nmc
->nmc_ptr
;
618 error
= nfsm_chain_advance(nmc
, nfsm_rndup(len
));
622 padlen
= nfsm_rndup(len
) - len
;
624 /* we need (len - left) more bytes */
625 mbcur
= nmc
->nmc_mcur
;
626 left
= nmc
->nmc_left
;
629 if (need
> mbuf_trailingspace(mbcur
)) {
631 * The needed bytes won't fit in the current mbuf so we'll
632 * allocate a new mbuf to hold the contiguous range of data.
634 nfsm_mbuf_getcluster(error
, &mb
, len
);
638 /* double check that this mbuf can hold all the data */
639 if (mbuf_maxlen(mb
) < len
) {
644 /* the returned pointer will be the new mbuf's data pointer */
645 *pptr
= ptr
= mbuf_data(mb
);
647 /* copy "left" bytes to the new mbuf */
648 bcopy(nmc
->nmc_ptr
, ptr
, left
);
650 mbuf_setlen(mb
, left
);
652 /* insert the new mbuf between the current and next mbufs */
653 error
= mbuf_setnext(mb
, mbuf_next(mbcur
));
655 error
= mbuf_setnext(mbcur
, mb
);
662 /* reduce current mbuf's length by "left" */
663 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - left
);
666 * update nmc's state to point at the end of the mbuf
667 * where the needed data will be copied to.
669 nmc
->nmc_mcur
= mbcur
= mb
;
671 nmc
->nmc_ptr
= (caddr_t
)ptr
;
673 /* The rest of the data will fit in this mbuf. */
675 /* the returned pointer will be the current pointer */
676 *pptr
= (u_char
*)nmc
->nmc_ptr
;
679 * update nmc's state to point at the end of the mbuf
680 * where the needed data will be copied to.
682 nmc
->nmc_ptr
+= left
;
687 * move the next "need" bytes into the current
688 * mbuf from the mbufs that follow
691 /* extend current mbuf length */
692 mbuf_setlen(mbcur
, mbuf_len(mbcur
) + need
);
694 /* mb follows mbufs we're copying/compacting data from */
695 mb
= mbuf_next(mbcur
);
698 /* copy as much as we need/can */
700 mblen
= mbuf_len(mb
);
701 cplen
= MIN(mblen
, need
);
703 bcopy(ptr
, nmc
->nmc_ptr
, cplen
);
705 * update the mbuf's pointer and length to reflect that
706 * the data was shifted to an earlier mbuf in the chain
708 error
= mbuf_setdata(mb
, ptr
+ cplen
, mblen
- cplen
);
710 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
713 /* update pointer/need */
714 nmc
->nmc_ptr
+= cplen
;
717 /* if more needed, go to next mbuf */
723 /* did we run out of data in the mbuf chain? */
725 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
730 * update nmc's state to point after this contiguous data
732 * "mb" points to the last mbuf we copied data from so we
733 * just set nmc to point at whatever remains in that mbuf.
736 nmc
->nmc_ptr
= mbuf_data(mb
);
737 nmc
->nmc_left
= mbuf_len(mb
);
739 /* move past any padding */
741 error
= nfsm_chain_advance(nmc
, padlen
);
748 * nfsm_chain_get_opaque_f()
750 * Read the next "len" bytes in the chain into "buf".
751 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
754 nfsm_chain_get_opaque_f(struct nfsm_chain
*nmc
, size_t len
, u_char
*buf
)
756 size_t cplen
, padlen
;
759 padlen
= nfsm_rndup(len
) - len
;
761 /* loop through mbufs copying all the data we need */
762 while (len
&& nmc
->nmc_mcur
) {
763 /* copy as much as we need/can */
764 cplen
= MIN(nmc
->nmc_left
, len
);
766 bcopy(nmc
->nmc_ptr
, buf
, cplen
);
767 nmc
->nmc_ptr
+= cplen
;
768 nmc
->nmc_left
-= cplen
;
772 /* if more needed, go to next mbuf */
774 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
776 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
777 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
781 /* did we run out of data in the mbuf chain? */
787 nfsm_chain_adv(error
, nmc
, padlen
);
794 * nfsm_chain_get_uio()
796 * Read the next "len" bytes in the chain into the given uio.
797 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
800 nfsm_chain_get_uio(struct nfsm_chain
*nmc
, size_t len
, uio_t uio
)
802 size_t cplen
, padlen
;
805 padlen
= nfsm_rndup(len
) - len
;
807 /* loop through mbufs copying all the data we need */
808 while (len
&& nmc
->nmc_mcur
) {
809 /* copy as much as we need/can */
810 cplen
= MIN(nmc
->nmc_left
, len
);
812 cplen
= MIN(cplen
, INT32_MAX
);
813 error
= uiomove(nmc
->nmc_ptr
, (int)cplen
, uio
);
817 nmc
->nmc_ptr
+= cplen
;
818 nmc
->nmc_left
-= cplen
;
821 /* if more needed, go to next mbuf */
823 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
825 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
826 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
830 /* did we run out of data in the mbuf chain? */
836 nfsm_chain_adv(error
, nmc
, padlen
);
842 #if CONFIG_NFS_CLIENT
845 nfsm_chain_add_string_nfc(struct nfsm_chain
*nmc
, const uint8_t *s
, size_t slen
)
847 uint8_t smallbuf
[64];
848 uint8_t *nfcname
= smallbuf
;
849 size_t buflen
= sizeof(smallbuf
), nfclen
;
852 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
853 if (error
== ENAMETOOLONG
) {
855 nfcname
= zalloc(ZV_NAMEI
);
856 error
= utf8_normalizestr(s
, slen
, nfcname
, &nfclen
, buflen
, UTF_PRECOMPOSED
| UTF_NO_NULL_TERM
);
859 /* if we got an error, just use the original string */
861 nfsm_chain_add_string(error
, nmc
, s
, slen
);
863 nfsm_chain_add_string(error
, nmc
, nfcname
, nfclen
);
866 if (nfcname
&& (nfcname
!= smallbuf
)) {
867 NFS_ZFREE(ZV_NAMEI
, nfcname
);
873 * Add an NFSv2 "sattr" structure to an mbuf chain
876 nfsm_chain_add_v2sattr_f(struct nfsm_chain
*nmc
, struct vnode_attr
*vap
, uint32_t szrdev
)
880 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
,
881 (VATTR_IS_ACTIVE(vap
, va_mode
) ? vap
->va_mode
: 0600)));
882 nfsm_chain_add_32(error
, nmc
,
883 VATTR_IS_ACTIVE(vap
, va_uid
) ? vap
->va_uid
: (uint32_t)-1);
884 nfsm_chain_add_32(error
, nmc
,
885 VATTR_IS_ACTIVE(vap
, va_gid
) ? vap
->va_gid
: (uint32_t)-1);
886 nfsm_chain_add_32(error
, nmc
, szrdev
);
887 nfsm_chain_add_v2time(error
, nmc
,
888 VATTR_IS_ACTIVE(vap
, va_access_time
) ?
889 &vap
->va_access_time
: NULL
);
890 nfsm_chain_add_v2time(error
, nmc
,
891 VATTR_IS_ACTIVE(vap
, va_modify_time
) ?
892 &vap
->va_modify_time
: NULL
);
898 * Add an NFSv3 "sattr" structure to an mbuf chain
901 nfsm_chain_add_v3sattr_f(
902 __unused
struct nfsmount
*nmp
,
903 struct nfsm_chain
*nmc
,
904 struct vnode_attr
*vap
)
908 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
909 nfsm_chain_add_32(error
, nmc
, TRUE
);
910 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
);
912 nfsm_chain_add_32(error
, nmc
, FALSE
);
914 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
915 nfsm_chain_add_32(error
, nmc
, TRUE
);
916 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
918 nfsm_chain_add_32(error
, nmc
, FALSE
);
920 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
921 nfsm_chain_add_32(error
, nmc
, TRUE
);
922 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
924 nfsm_chain_add_32(error
, nmc
, FALSE
);
926 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
927 nfsm_chain_add_32(error
, nmc
, TRUE
);
928 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
930 nfsm_chain_add_32(error
, nmc
, FALSE
);
932 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
933 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
934 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
936 if (VATTR_IS_ACTIVE(vap
, va_access_time
)) {
937 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
938 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_sec
);
939 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_nsec
);
941 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
943 if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) {
944 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
945 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_sec
);
946 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_nsec
);
948 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
958 * nfsm_chain_get_fh_attr()
960 * Get the file handle and attributes from an mbuf chain. (NFSv2/v3)
963 nfsm_chain_get_fh_attr(
964 struct nfsmount
*nmp
,
965 struct nfsm_chain
*nmc
,
971 struct nfs_vattr
*nvap
)
973 int error
= 0, gotfh
, gotattr
;
977 if (nfsvers
== NFS_VER3
) { /* check for file handle */
978 nfsm_chain_get_32(error
, nmc
, gotfh
);
980 if (!error
&& gotfh
) { /* get file handle */
981 nfsm_chain_get_fh(error
, nmc
, nfsvers
, fhp
);
985 if (nfsvers
== NFS_VER3
) { /* check for file attributes */
986 nfsm_chain_get_32(error
, nmc
, gotattr
);
990 if (!gotfh
) { /* skip attributes */
991 nfsm_chain_adv(error
, nmc
, NFSX_V3FATTR
);
992 } else { /* get attributes */
993 error
= nfs_parsefattr(nmp
, nmc
, nfsvers
, nvap
);
996 /* we need valid attributes in order to call nfs_nget() */
997 if (nfs3_getattr_rpc(NULL
, NFSTOMP(dnp
), fhp
->fh_data
, fhp
->fh_len
, 0, ctx
, nvap
, xidp
)) {
1007 * Get and process NFSv3 WCC data from an mbuf chain
1010 nfsm_chain_get_wcc_data_f(
1011 struct nfsm_chain
*nmc
,
1013 struct timespec
*premtime
,
1020 nfsm_chain_get_32(error
, nmc
, flag
);
1021 if (!error
&& flag
) {
1022 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1023 nfsm_chain_get_32(error
, nmc
, premtime
->tv_sec
);
1024 nfsm_chain_get_32(error
, nmc
, premtime
->tv_nsec
);
1025 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
1027 premtime
->tv_sec
= 0;
1028 premtime
->tv_nsec
= 0;
1030 nfsm_chain_postop_attr_update_flag(error
, nmc
, np
, *newpostattr
, xidp
);
1036 * Get the next RPC transaction ID (XID)
1039 nfs_get_xid(uint64_t *xidp
)
1043 lck_mtx_lock(nfs_request_mutex
);
1046 * Derive initial xid from system time.
1048 * Note: it's OK if this code inits nfs_xid to 0 (for example,
1049 * due to a broken clock) because we immediately increment it
1050 * and we guarantee to never use xid 0. So, nfs_xid should only
1051 * ever be 0 the first time this function is called.
1054 nfs_xid
= tv
.tv_sec
<< 12;
1056 if (++nfs_xid
== 0) {
1057 /* Skip zero xid if it should ever happen. */
1061 *xidp
= nfs_xid
+ (nfs_xidwrap
<< 32);
1062 lck_mtx_unlock(nfs_request_mutex
);
1066 * Build the RPC header and fill in the authorization info.
1067 * Returns the head of the mbuf list and the xid.
1077 struct nfsmount
*nmp
= req
->r_nmp
;
1078 int nfsvers
= nmp
->nm_vers
;
1079 int proc
= ((nfsvers
== NFS_VER2
) ? nfsv2_procid
[req
->r_procnum
] : (int)req
->r_procnum
);
1081 return nfsm_rpchead2(nmp
, nmp
->nm_sotype
, NFS_PROG
, nfsvers
, proc
,
1082 req
->r_auth
, req
->r_cred
, req
, mrest
, xidp
, mreqp
);
1086 * get_auiliary_groups: Gets the supplementary groups from a credential.
1088 * IN: cred: credential to get the associated groups from.
1089 * OUT: groups: An array of gids of NGROUPS size.
1090 * IN: count: The number of groups to get; i.e.; the number of groups the server supports
1092 * returns: The number of groups found.
1094 * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less
1098 get_auxiliary_groups(kauth_cred_t cred
, gid_t groups
[NGROUPS
], size_t count
)
1101 size_t maxcount
= count
< NGROUPS
? count
+ 1 : NGROUPS
;
1104 for (i
= 0; i
< NGROUPS
; i
++) {
1105 groups
[i
] = -2; /* Initialize to the nobody group */
1107 (void)kauth_cred_getgroups(cred
, groups
, &maxcount
);
1113 * kauth_get_groups returns the primary group followed by the
1114 * users auxiliary groups. If the number of groups the server supports
1115 * is less than NGROUPS, then we will drop the first group so that
1116 * we can send one more group over the wire.
1120 if (count
< NGROUPS
) {
1121 pgid
= kauth_cred_getgid(cred
);
1122 if (pgid
== groups
[0]) {
1124 for (i
= 0; i
< maxcount
; i
++) {
1125 groups
[i
] = groups
[i
+ 1];
1134 nfsm_rpchead2(__unused
struct nfsmount
*nmp
, int sotype
, int prog
, int vers
, int proc
, int auth_type
,
1135 kauth_cred_t cred
, struct nfsreq
*req
, mbuf_t mrest
, u_int64_t
*xidp
, mbuf_t
*mreqp
)
1139 int error
, auth_len
= 0, authsiz
, reqlen
;
1141 struct nfsm_chain nmreq
;
1142 gid_t grouplist
[NGROUPS
];
1143 size_t groupcount
= 0;
1145 /* calculate expected auth length */
1146 switch (auth_type
) {
1152 size_t count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1157 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1158 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1165 if (!req
|| !cred
) {
1168 auth_len
= 5 * NFSX_UNSIGNED
+ 0; // zero context handle for now
1170 #endif /* CONFIG_NFS_GSS */
1174 authsiz
= nfsm_rndup(auth_len
);
1176 /* allocate the packet */
1177 headlen
= authsiz
+ 10 * NFSX_UNSIGNED
;
1178 if (sotype
== SOCK_STREAM
) { /* also include room for any RPC Record Mark */
1179 headlen
+= NFSX_UNSIGNED
;
1181 if (headlen
>= nfs_mbuf_minclsize
) {
1182 error
= mbuf_getpacket(MBUF_WAITOK
, &mreq
);
1184 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mreq
);
1186 if (headlen
< nfs_mbuf_mhlen
) {
1187 mbuf_align_32(mreq
, headlen
);
1189 mbuf_align_32(mreq
, 8 * NFSX_UNSIGNED
);
1194 /* unable to allocate packet */
1195 /* XXX should we keep statistics for these errors? */
1200 * If the caller gave us a non-zero XID then use it because
1201 * it may be a higher-level resend with a GSSAPI credential.
1202 * Otherwise, allocate a new one.
1208 /* build the header(s) */
1209 nfsm_chain_init(&nmreq
, mreq
);
1211 /* First, if it's a TCP stream insert space for an RPC record mark */
1212 if (sotype
== SOCK_STREAM
) {
1213 nfsm_chain_add_32(error
, &nmreq
, 0);
1216 /* Then the RPC header. */
1217 nfsm_chain_add_32(error
, &nmreq
, (*xidp
& 0xffffffff));
1218 nfsm_chain_add_32(error
, &nmreq
, RPC_CALL
);
1219 nfsm_chain_add_32(error
, &nmreq
, RPC_VER2
);
1220 nfsm_chain_add_32(error
, &nmreq
, prog
);
1221 nfsm_chain_add_32(error
, &nmreq
, vers
);
1222 nfsm_chain_add_32(error
, &nmreq
, proc
);
1227 switch (auth_type
) {
1229 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* auth */
1230 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1231 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* verf */
1232 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1233 nfsm_chain_build_done(error
, &nmreq
);
1234 /* Append the args mbufs */
1236 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1240 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_SYS
);
1241 nfsm_chain_add_32(error
, &nmreq
, authsiz
);
1243 nfsm_chain_add_32(error
, &nmreq
, 0); /* stamp */
1245 nfsm_chain_add_32(error
, &nmreq
, 0); /* zero-length hostname */
1246 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(cred
)); /* UID */
1247 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getgid(cred
)); /* GID */
1248 nfsm_chain_add_32(error
, &nmreq
, groupcount
);/* additional GIDs */
1249 for (i
= 0; i
< groupcount
; i
++) {
1250 nfsm_chain_add_32(error
, &nmreq
, grouplist
[i
]);
1253 /* And the verifier... */
1254 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NONE
); /* flavor */
1255 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1256 nfsm_chain_build_done(error
, &nmreq
);
1258 /* Append the args mbufs */
1260 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1268 error
= nfs_gss_clnt_cred_put(req
, &nmreq
, mrest
);
1269 if (error
== ENEEDAUTH
) {
1270 size_t count
= nmp
->nm_numgrps
< NGROUPS
? nmp
->nm_numgrps
: NGROUPS
;
1273 * Use sec=sys for this user
1276 req
->r_auth
= auth_type
= RPCAUTH_SYS
;
1277 groupcount
= get_auxiliary_groups(cred
, grouplist
, count
);
1278 auth_len
= ((uint32_t)groupcount
+ 5) * NFSX_UNSIGNED
;
1279 authsiz
= nfsm_rndup(auth_len
);
1283 #endif /* CONFIG_NFS_GSS */
1287 /* finish setting up the packet */
1289 error
= mbuf_pkthdr_setrcvif(mreq
, 0);
1297 /* Calculate the size of the request */
1299 for (mb
= nmreq
.nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
1300 reqlen
+= mbuf_len(mb
);
1303 mbuf_pkthdr_setlen(mreq
, reqlen
);
1306 * If the request goes on a TCP stream,
1307 * set its size in the RPC record mark.
1308 * The record mark count doesn't include itself
1309 * and the last fragment bit is set.
1311 if (sotype
== SOCK_STREAM
) {
1312 nfsm_chain_set_recmark(error
, &nmreq
,
1313 (reqlen
- NFSX_UNSIGNED
) | 0x80000000);
1321 * Parse an NFS file attribute structure out of an mbuf chain.
1325 __unused
struct nfsmount
*nmp
,
1326 struct nfsm_chain
*nmc
,
1328 struct nfs_vattr
*nvap
)
1333 uint32_t vmode
, val
, val2
;
1339 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TYPE
);
1340 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_MODE
);
1341 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
);
1342 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
);
1343 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
);
1344 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SIZE
);
1345 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_SPACE_USED
);
1346 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_RAWDEV
);
1347 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FSID
);
1348 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_FILEID
);
1349 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_ACCESS
);
1350 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
);
1351 NFS_BITMAP_SET(nvap
->nva_bitmap
, NFS_FATTR_TIME_METADATA
);
1353 nfsm_chain_get_32(error
, nmc
, nvtype
);
1354 nfsm_chain_get_32(error
, nmc
, vmode
);
1357 if (nfsvers
== NFS_VER3
) {
1358 nvap
->nva_type
= vtype
= nfstov_type(nvtype
, nfsvers
);
1361 * The duplicate information returned in fa_type and fa_mode
1362 * is an ambiguity in the NFS version 2 protocol.
1364 * VREG should be taken literally as a regular file. If a
1365 * server intends to return some type information differently
1366 * in the upper bits of the mode field (e.g. for sockets, or
1367 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
1368 * leave the examination of the mode bits even in the VREG
1369 * case to avoid breakage for bogus servers, but we make sure
1370 * that there are actually type bits set in the upper part of
1371 * fa_mode (and failing that, trust the va_type field).
1373 * NFSv3 cleared the issue, and requires fa_mode to not
1374 * contain any type information (while also introducing
1375 * sockets and FIFOs for fa_type).
1377 vtype
= nfstov_type(nvtype
, nfsvers
);
1378 if ((vtype
== VNON
) || ((vtype
== VREG
) && ((vmode
& S_IFMT
) != 0))) {
1379 vtype
= IFTOVT(vmode
);
1381 nvap
->nva_type
= vtype
;
1384 nvap
->nva_mode
= (vmode
& 07777);
1386 nfsm_chain_get_32(error
, nmc
, nvap
->nva_nlink
);
1387 nfsm_chain_get_32(error
, nmc
, nvap
->nva_uid
);
1388 nfsm_chain_get_32(error
, nmc
, nvap
->nva_gid
);
1390 if (nfsvers
== NFS_VER3
) {
1391 nfsm_chain_get_64(error
, nmc
, nvap
->nva_size
);
1392 nfsm_chain_get_64(error
, nmc
, nvap
->nva_bytes
);
1393 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata1
);
1394 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata2
);
1396 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fsid
.major
);
1397 nvap
->nva_fsid
.minor
= 0;
1398 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fileid
);
1400 nfsm_chain_get_32(error
, nmc
, nvap
->nva_size
);
1401 nfsm_chain_adv(error
, nmc
, NFSX_UNSIGNED
);
1402 nfsm_chain_get_32(error
, nmc
, rdev
);
1404 nvap
->nva_rawdev
.specdata1
= major(rdev
);
1405 nvap
->nva_rawdev
.specdata2
= minor(rdev
);
1406 nfsm_chain_get_32(error
, nmc
, val
); /* blocks */
1408 nvap
->nva_bytes
= val
* NFS_FABLKSIZE
;
1409 nfsm_chain_get_32(error
, nmc
, val
);
1411 nvap
->nva_fsid
.major
= (uint64_t)val
;
1412 nvap
->nva_fsid
.minor
= 0;
1413 nfsm_chain_get_32(error
, nmc
, val
);
1415 nvap
->nva_fileid
= (uint64_t)val
;
1416 /* Really ugly NFSv2 kludge. */
1417 if ((vtype
== VCHR
) && (rdev
== (dev_t
)0xffffffff)) {
1418 nvap
->nva_type
= VFIFO
;
1421 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1422 nvap
->nva_timesec
[NFSTIME_ACCESS
],
1423 nvap
->nva_timensec
[NFSTIME_ACCESS
]);
1424 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1425 nvap
->nva_timesec
[NFSTIME_MODIFY
],
1426 nvap
->nva_timensec
[NFSTIME_MODIFY
]);
1427 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1428 nvap
->nva_timesec
[NFSTIME_CHANGE
],
1429 nvap
->nva_timensec
[NFSTIME_CHANGE
]);
1437 * Load the attribute cache (that lives in the nfsnode entry) with
1438 * the value pointed to by nvap, unless the file type in the attribute
1439 * cache doesn't match the file type in the nvap, in which case log a
1440 * warning and return ESTALE.
1442 * If the dontshrink flag is set, then it's not safe to call ubc_setsize()
1443 * to shrink the size of the file.
1448 struct nfs_vattr
*nvap
,
1455 struct nfs_vattr
*npnvap
;
1456 int xattr
= np
->n_vattr
.nva_flags
& NFS_FFLAG_IS_ATTR
;
1457 int referral
= np
->n_vattr
.nva_flags
& NFS_FFLAG_TRIGGER_REFERRAL
;
1458 int aclbit
, monitored
, error
= 0;
1460 struct nfsmount
*nmp
;
1461 uint32_t events
= np
->n_events
;
1463 if (np
->n_hflag
& NHINIT
) {
1468 mp
= vnode_mount(vp
);
1470 monitored
= vp
? vnode_ismonitored(vp
) : 0;
1472 FSDBG_TOP(527, np
, vp
, *xidp
>> 32, *xidp
);
1474 if (!((nmp
= VFSTONFS(mp
)))) {
1475 FSDBG_BOT(527, ENXIO
, 1, 0, *xidp
);
1479 if (*xidp
< np
->n_xid
) {
1481 * We have already updated attributes with a response from
1482 * a later request. The attributes we have here are probably
1483 * stale so we drop them (just return). However, our
1484 * out-of-order receipt could be correct - if the requests were
1485 * processed out of order at the server. Given the uncertainty
1486 * we invalidate our cached attributes. *xidp is zeroed here
1487 * to indicate the attributes were dropped - only getattr
1488 * cares - it needs to retry the rpc.
1490 NATTRINVALIDATE(np
);
1491 FSDBG_BOT(527, 0, np
, np
->n_xid
, *xidp
);
1496 if (vp
&& (nvap
->nva_type
!= vnode_vtype(vp
))) {
1498 * The filehandle has changed type on us. This can be
1499 * caused by either the server not having unique filehandles
1500 * or because another client has removed the previous
1501 * filehandle and a new object (of a different type)
1502 * has been created with the same filehandle.
1504 * We can't simply switch the type on the vnode because
1505 * there may be type-specific fields that need to be
1506 * cleaned up or set up.
1508 * So, what should we do with this vnode?
1510 * About the best we can do is log a warning and return
1511 * an error. ESTALE is about the closest error, but it
1512 * is a little strange that we come up with this error
1513 * internally instead of simply passing it through from
1514 * the server. Hopefully, the vnode will be reclaimed
1515 * soon so the filehandle can be reincarnated as the new
1518 printf("nfs loadattrcache vnode changed type, was %d now %d\n",
1519 vnode_vtype(vp
), nvap
->nva_type
);
1522 events
|= VNODE_EVENT_DELETE
;
1527 npnvap
= &np
->n_vattr
;
1530 * The ACL cache needs special handling because it is not
1531 * always updated. Save current ACL cache state so it can
1532 * be restored after copying the new attributes into place.
1534 aclbit
= NFS_BITMAP_ISSET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1535 acl
= npnvap
->nva_acl
;
1539 * For monitored nodes, check for attribute changes that should generate events.
1541 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_NUMLINKS
) &&
1542 (nvap
->nva_nlink
!= npnvap
->nva_nlink
)) {
1543 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_LINK
;
1545 if (events
& VNODE_EVENT_PERMS
) {
1546 /* no need to do all the checking if it's already set */;
1547 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_MODE
) &&
1548 (nvap
->nva_mode
!= npnvap
->nva_mode
)) {
1549 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1550 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1551 (nvap
->nva_uid
!= npnvap
->nva_uid
)) {
1552 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1553 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1554 (nvap
->nva_gid
!= npnvap
->nva_gid
)) {
1555 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1557 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1558 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER
) &&
1559 !kauth_guid_equal(&nvap
->nva_uuuid
, &npnvap
->nva_uuuid
)) {
1560 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1561 } else if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_OWNER_GROUP
) &&
1562 !kauth_guid_equal(&nvap
->nva_guuid
, &npnvap
->nva_guuid
)) {
1563 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1564 } else if ((NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
) &&
1565 nvap
->nva_acl
&& npnvap
->nva_acl
&&
1566 ((nvap
->nva_acl
->acl_entrycount
!= npnvap
->nva_acl
->acl_entrycount
) ||
1567 bcmp(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
))))) {
1568 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_PERMS
;
1574 ((nmp
->nm_vers
>= NFS_VER4
) && NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_CHANGE
) && (nvap
->nva_change
!= npnvap
->nva_change
)) ||
1576 (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_TIME_MODIFY
) &&
1577 ((nvap
->nva_timesec
[NFSTIME_MODIFY
] != npnvap
->nva_timesec
[NFSTIME_MODIFY
]) ||
1578 (nvap
->nva_timensec
[NFSTIME_MODIFY
] != npnvap
->nva_timensec
[NFSTIME_MODIFY
])))) {
1579 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_WRITE
;
1581 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_RAWDEV
) &&
1582 ((nvap
->nva_rawdev
.specdata1
!= npnvap
->nva_rawdev
.specdata1
) ||
1583 (nvap
->nva_rawdev
.specdata2
!= npnvap
->nva_rawdev
.specdata2
))) {
1584 events
|= VNODE_EVENT_ATTRIB
;
1586 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_FILEID
) &&
1587 (nvap
->nva_fileid
!= npnvap
->nva_fileid
)) {
1588 events
|= VNODE_EVENT_ATTRIB
;
1590 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ARCHIVE
) &&
1591 ((nvap
->nva_flags
& NFS_FFLAG_ARCHIVED
) != (npnvap
->nva_flags
& NFS_FFLAG_ARCHIVED
))) {
1592 events
|= VNODE_EVENT_ATTRIB
;
1594 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_HIDDEN
) &&
1595 ((nvap
->nva_flags
& NFS_FFLAG_HIDDEN
) != (npnvap
->nva_flags
& NFS_FFLAG_HIDDEN
))) {
1596 events
|= VNODE_EVENT_ATTRIB
;
1598 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_TIME_CREATE
) &&
1599 ((nvap
->nva_timesec
[NFSTIME_CREATE
] != npnvap
->nva_timesec
[NFSTIME_CREATE
]) ||
1600 (nvap
->nva_timensec
[NFSTIME_CREATE
] != npnvap
->nva_timensec
[NFSTIME_CREATE
]))) {
1601 events
|= VNODE_EVENT_ATTRIB
;
1603 if (!events
&& NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_TIME_BACKUP
) &&
1604 ((nvap
->nva_timesec
[NFSTIME_BACKUP
] != npnvap
->nva_timesec
[NFSTIME_BACKUP
]) ||
1605 (nvap
->nva_timensec
[NFSTIME_BACKUP
] != npnvap
->nva_timensec
[NFSTIME_BACKUP
]))) {
1606 events
|= VNODE_EVENT_ATTRIB
;
1611 /* Copy the attributes to the attribute cache */
1612 if (nmp
->nm_vers
>= NFS_VER4
&& npnvap
->nva_flags
& NFS_FFLAG_PARTIAL_WRITE
) {
1614 * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested.
1615 * In such cases, we do not update the time stamp - but the requested attributes.
1617 NFS_BITMAP_COPY_ATTR(nvap
, npnvap
, TYPE
, type
);
1618 NFS_BITMAP_COPY_ATTR(nvap
, npnvap
, CHANGE
, change
);
1619 NFS_BITMAP_COPY_ATTR(nvap
, npnvap
, SIZE
, size
);
1620 NFS_BITMAP_COPY_TIME(nvap
, npnvap
, METADATA
, CHANGE
);
1621 NFS_BITMAP_COPY_TIME(nvap
, npnvap
, MODIFY
, MODIFY
);
1623 #endif /* CONFIG_NFS4 */
1625 bcopy((caddr_t
)nvap
, (caddr_t
)npnvap
, sizeof(*nvap
));
1627 np
->n_attrstamp
= now
.tv_sec
;
1631 /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */
1633 nvap
->nva_flags
|= xattr
;
1635 if (vp
&& referral
) {
1636 nvap
->nva_flags
|= referral
;
1639 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1640 /* we're updating the ACL */
1641 if (nvap
->nva_acl
) {
1642 /* make a copy of the acl for the cache */
1643 npnvap
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1644 if (npnvap
->nva_acl
) {
1645 bcopy(nvap
->nva_acl
, npnvap
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1647 /* can't make a copy to cache, invalidate ACL cache */
1648 NFS_BITMAP_CLR(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1654 kauth_acl_free(acl
);
1658 if (NFS_BITMAP_ISSET(nvap
->nva_bitmap
, NFS_FATTR_ACL
)) {
1659 /* update the ACL timestamp */
1660 np
->n_aclstamp
= now
.tv_sec
;
1662 /* we aren't updating the ACL, so restore original values */
1664 NFS_BITMAP_SET(npnvap
->nva_bitmap
, NFS_FATTR_ACL
);
1666 npnvap
->nva_acl
= acl
;
1672 * For NFSv4, if the fsid doesn't match the fsid for the mount, then
1673 * this node is for a different file system on the server. So we mark
1674 * this node as a trigger node that will trigger the mirror mount.
1676 if ((nmp
->nm_vers
>= NFS_VER4
) && (nvap
->nva_type
== VDIR
) &&
1677 ((np
->n_vattr
.nva_fsid
.major
!= nmp
->nm_fsid
.major
) ||
1678 (np
->n_vattr
.nva_fsid
.minor
!= nmp
->nm_fsid
.minor
))) {
1679 np
->n_vattr
.nva_flags
|= NFS_FFLAG_TRIGGER
;
1681 #endif /* CONFIG_NFS4 */
1682 #endif /* CONFIG_TRIGGERS */
1684 if (!vp
|| (nvap
->nva_type
!= VREG
)) {
1685 np
->n_size
= nvap
->nva_size
;
1686 } else if (nvap
->nva_size
!= np
->n_size
) {
1687 FSDBG(527, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1688 if (!UBCINFOEXISTS(vp
) || (dontshrink
&& (nvap
->nva_size
< np
->n_size
))) {
1689 /* asked not to shrink, so stick with current size */
1690 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001);
1691 nvap
->nva_size
= np
->n_size
;
1692 NATTRINVALIDATE(np
);
1693 } else if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1694 /* if we've modified, stick with larger size */
1695 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0002);
1696 nvap
->nva_size
= np
->n_size
;
1697 npnvap
->nva_size
= np
->n_size
;
1700 * n_size is protected by the data lock, so we need to
1701 * defer updating it until it's safe. We save the new size
1702 * and set a flag and it'll get updated the next time we get/drop
1703 * the data lock or the next time we do a getattr.
1705 np
->n_newsize
= nvap
->nva_size
;
1706 SET(np
->n_flag
, NUPDATESIZE
);
1708 events
|= VNODE_EVENT_ATTRIB
| VNODE_EVENT_EXTEND
;
1713 if (np
->n_flag
& NCHG
) {
1714 if (np
->n_flag
& NACC
) {
1715 nvap
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1716 nvap
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1718 if (np
->n_flag
& NUPD
) {
1719 nvap
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1720 nvap
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1725 if (monitored
&& events
) {
1726 nfs_vnode_notify(np
, events
);
1728 FSDBG_BOT(527, error
, np
, np
->n_size
, *xidp
);
1733 * Calculate the attribute timeout based on
1734 * how recently the file has been modified.
1737 nfs_attrcachetimeout(nfsnode_t np
)
1739 struct nfsmount
*nmp
;
1745 if (nfs_mount_gone(nmp
)) {
1749 isdir
= vnode_isdir(NFSTOV(np
));
1751 if ((nmp
->nm_vers
>= NFS_VER4
) && (np
->n_openflags
& N_DELEG_MASK
)) {
1752 /* If we have a delegation, we always use the max timeout. */
1753 timeo
= isdir
? nmp
->nm_acdirmax
: nmp
->nm_acregmax
;
1756 if ((np
)->n_flag
& NMODIFIED
) {
1757 /* If we have modifications, we always use the min timeout. */
1758 timeo
= isdir
? nmp
->nm_acdirmin
: nmp
->nm_acregmin
;
1760 /* Otherwise, we base the timeout on how old the file seems. */
1761 /* Note that if the client and server clocks are way out of sync, */
1762 /* timeout will probably get clamped to a min or max value */
1764 timeo
= (now
.tv_sec
- (np
)->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]) / 10;
1766 if (timeo
< nmp
->nm_acdirmin
) {
1767 timeo
= nmp
->nm_acdirmin
;
1768 } else if (timeo
> nmp
->nm_acdirmax
) {
1769 timeo
= nmp
->nm_acdirmax
;
1772 if (timeo
< nmp
->nm_acregmin
) {
1773 timeo
= nmp
->nm_acregmin
;
1774 } else if (timeo
> nmp
->nm_acregmax
) {
1775 timeo
= nmp
->nm_acregmax
;
1784 * Check the attribute cache time stamp.
1785 * If the cache is valid, copy contents to *nvaper and return 0
1786 * otherwise return an error.
1787 * Must be called with the node locked.
1790 nfs_getattrcache(nfsnode_t np
, struct nfs_vattr
*nvaper
, int flags
)
1792 struct nfs_vattr
*nvap
;
1793 struct timeval nowup
;
1795 struct nfsmount
*nmp
;
1797 /* Check if the attributes are valid. */
1798 if (!NATTRVALID(np
) || ((flags
& NGA_ACL
) && !NACLVALID(np
))) {
1799 FSDBG(528, np
, 0, 0xffffff01, ENOENT
);
1800 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1805 if (nfs_mount_gone(nmp
)) {
1809 * Verify the cached attributes haven't timed out.
1810 * If the server isn't responding, skip the check
1811 * and return cached attributes.
1813 if (!nfs_use_cache(nmp
)) {
1814 microuptime(&nowup
);
1815 if (np
->n_attrstamp
> nowup
.tv_sec
) {
1816 printf("NFS: Attribute time stamp is in the future by %ld seconds. Invalidating cache\n",
1817 np
->n_attrstamp
- nowup
.tv_sec
);
1818 NATTRINVALIDATE(np
);
1819 NACCESSINVALIDATE(np
);
1822 timeo
= nfs_attrcachetimeout(np
);
1823 if ((nowup
.tv_sec
- np
->n_attrstamp
) >= timeo
) {
1824 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1825 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1828 if ((flags
& NGA_ACL
) && ((nowup
.tv_sec
- np
->n_aclstamp
) >= timeo
)) {
1829 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1830 OSAddAtomic64(1, &nfsstats
.attrcache_misses
);
1835 nvap
= &np
->n_vattr
;
1836 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, 0xcace);
1837 OSAddAtomic64(1, &nfsstats
.attrcache_hits
);
1839 if (nvap
->nva_type
!= VREG
) {
1840 np
->n_size
= nvap
->nva_size
;
1841 } else if (nvap
->nva_size
!= np
->n_size
) {
1842 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1843 if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1844 /* if we've modified, stick with larger size */
1845 nvap
->nva_size
= np
->n_size
;
1848 * n_size is protected by the data lock, so we need to
1849 * defer updating it until it's safe. We save the new size
1850 * and set a flag and it'll get updated the next time we get/drop
1851 * the data lock or the next time we do a getattr.
1853 np
->n_newsize
= nvap
->nva_size
;
1854 SET(np
->n_flag
, NUPDATESIZE
);
1858 bcopy((caddr_t
)nvap
, (caddr_t
)nvaper
, sizeof(struct nfs_vattr
));
1859 if (np
->n_flag
& NCHG
) {
1860 if (np
->n_flag
& NACC
) {
1861 nvaper
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1862 nvaper
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1864 if (np
->n_flag
& NUPD
) {
1865 nvaper
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1866 nvaper
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1869 if (nvap
->nva_acl
) {
1870 if (flags
& NGA_ACL
) {
1871 nvaper
->nva_acl
= kauth_acl_alloc(nvap
->nva_acl
->acl_entrycount
);
1872 if (!nvaper
->nva_acl
) {
1875 bcopy(nvap
->nva_acl
, nvaper
->nva_acl
, KAUTH_ACL_COPYSIZE(nvap
->nva_acl
));
1877 nvaper
->nva_acl
= NULL
;
1884 * When creating file system objects:
1885 * Don't bother setting UID if it's the same as the credential performing the create.
1886 * Don't bother setting GID if it's the same as the directory or credential.
1889 nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp
, struct vnode_attr
*vap
, vfs_context_t ctx
)
1891 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
1892 if (kauth_cred_getuid(vfs_context_ucred(ctx
)) == vap
->va_uid
) {
1893 VATTR_CLEAR_ACTIVE(vap
, va_uid
);
1894 VATTR_CLEAR_ACTIVE(vap
, va_uuuid
);
1897 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
1898 if ((vap
->va_gid
== dnp
->n_vattr
.nva_gid
) ||
1899 (kauth_cred_getgid(vfs_context_ucred(ctx
)) == vap
->va_gid
)) {
1900 VATTR_CLEAR_ACTIVE(vap
, va_gid
);
1901 VATTR_CLEAR_ACTIVE(vap
, va_guuid
);
1907 * Convert a universal address string to a sockaddr structure.
1909 * Universal addresses can be in the following formats:
1911 * d = decimal (IPv4)
1912 * x = hexadecimal (IPv6)
1913 * p = port (decimal)
1918 * x:x:x:x:x:x:x:x.p.p
1919 * x:x:x:x:x:x:d.d.d.d
1920 * x:x:x:x:x:x:d.d.d.d.p.p
1922 * IPv6 strings can also have a series of zeroes elided
1923 * IPv6 strings can also have a %scope suffix at the end (after any port)
1925 * rules & exceptions:
1926 * - value before : is hex
1927 * - value before . is dec
1928 * - once . hit, all values are dec
1929 * - hex+port case means value before first dot is actually hex
1930 * - . is always preceded by digits except if last hex was double-colon
1932 * scan, converting #s to bytes
1933 * first time a . is encountered, scan the rest to count them.
1934 * 2 dots = just port
1935 * 3 dots = just IPv4 no port
1936 * 5 dots = IPv4 and port
1939 #define IS_DIGIT(C) \
1940 (((C) >= '0') && ((C) <= '9'))
1942 #define IS_XDIGIT(C) \
1944 (((C) >= 'A') && ((C) <= 'F')) || \
1945 (((C) >= 'a') && ((C) <= 'f')))
1948 nfs_uaddr2sockaddr(const char *uaddr
, struct sockaddr
*addr
)
1950 const char *p
, *pd
; /* pointers to current character in scan */
1951 const char *pnum
; /* pointer to current number to decode */
1952 const char *pscope
; /* pointer to IPv6 scope ID */
1953 uint8_t a
[18]; /* octet array to store address bytes */
1954 int i
; /* index of next octet to decode */
1955 int dci
; /* index of octet to insert double-colon zeroes */
1956 int dcount
, xdcount
; /* count of digits in current number */
1957 int needmore
; /* set when we know we need more input (e.g. after colon, period) */
1958 int dots
; /* # of dots */
1959 int hex
; /* contains hex values */
1960 unsigned long val
; /* decoded value */
1961 int s
; /* index used for sliding array to insert elided zeroes */
1963 /* AF_LOCAL address are paths that start with '/' or are empty */
1964 if (*uaddr
== '/' || *uaddr
== '\0') { /* AF_LOCAL address */
1965 struct sockaddr_un
*sun
= (struct sockaddr_un
*)addr
;
1966 sun
->sun_family
= AF_LOCAL
;
1967 sun
->sun_len
= sizeof(struct sockaddr_un
);
1968 strlcpy(sun
->sun_path
, uaddr
, sizeof(sun
->sun_path
));
1974 #define DECIMALVALUE 1
1978 if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \
1980 if (((TYPE) == DECIMALVALUE) && xdcount) \
1982 val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \
1983 if (((TYPE) == DECIMALVALUE) && (val >= 256)) \
1985 /* check if there is room left in the array */ \
1986 if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \
1988 if ((TYPE) == HEXVALUE) \
1989 a[i++] = ((val >> 8) & 0xff); \
1990 a[i++] = (val & 0xff); \
1996 i
= dcount
= xdcount
= 0;
2000 if ((*p
== ':') && (*++p
!= ':')) { /* if it starts with colon, gotta be a double */
2005 if (IS_XDIGIT(*p
)) {
2007 if (!IS_DIGIT(*p
)) {
2012 } else if (*p
== '.') {
2013 /* rest is decimal IPv4 dotted quad and/or port */
2015 /* this is the first, so count them */
2016 for (pd
= p
; *pd
; pd
++) {
2021 } else if (hex
&& (*pd
== '%')) {
2023 } else if ((*pd
< '0') || (*pd
> '9')) {
2027 if ((dots
!= 2) && (dots
!= 3) && (dots
!= 5)) {
2030 if (hex
&& (dots
== 2)) { /* hex+port */
2031 if (!dcount
&& needmore
) {
2034 if (dcount
) { /* last hex may be elided zero */
2043 dcount
= xdcount
= 0;
2046 } else if (*p
== ':') {
2051 if (!dcount
) { /* missing number, probably double colon */
2052 if (dci
>= 0) { /* can only have one double colon */
2059 dcount
= xdcount
= 0;
2063 } else if (*p
== '%') { /* scope ID delimiter */
2070 } else { /* unexpected character */
2074 if (needmore
&& !dcount
) {
2077 if (dcount
) { /* decode trailing number */
2078 GET(dots
? DECIMALVALUE
: HEXVALUE
);
2080 if (dci
>= 0) { /* got a double-colon at i, need to insert a range of zeroes */
2081 /* if we got a port, slide to end of array */
2082 /* otherwise, slide to end of address (non-port) values */
2083 int end
= ((dots
== 2) || (dots
== 5)) ? sizeof(a
) : (sizeof(a
) - 2);
2084 if (i
% 2) { /* length of zero range must be multiple of 2 */
2087 if (i
>= end
) { /* no room? */
2090 /* slide (i-dci) numbers up from index dci */
2091 for (s
= 0; s
< (i
- dci
); s
++) {
2092 a
[end
- 1 - s
] = a
[i
- 1 - s
];
2094 /* zero (end-i) numbers at index dci */
2095 for (s
= 0; s
< (end
- i
); s
++) {
2101 /* copy out resulting socket address */
2103 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)addr
;
2104 if ((((dots
== 0) || (dots
== 3)) && (i
!= (sizeof(a
) - 2)))) {
2107 if ((((dots
== 2) || (dots
== 5)) && (i
!= sizeof(a
)))) {
2110 bzero(sin6
, sizeof(struct sockaddr_in6
));
2111 sin6
->sin6_len
= sizeof(struct sockaddr_in6
);
2112 sin6
->sin6_family
= AF_INET6
;
2113 bcopy(a
, &sin6
->sin6_addr
.s6_addr
, sizeof(struct in6_addr
));
2114 if ((dots
== 5) || (dots
== 2)) {
2115 sin6
->sin6_port
= htons((in_port_t
)((a
[16] << 8) | a
[17]));
2118 for (p
= pscope
; IS_DIGIT(*p
); p
++) {
2121 if (*p
&& !IS_DIGIT(*p
)) { /* name */
2122 ifnet_t interface
= NULL
;
2123 if (ifnet_find_by_name(pscope
, &interface
) == 0) {
2124 sin6
->sin6_scope_id
= ifnet_index(interface
);
2127 ifnet_release(interface
);
2129 } else { /* decimal number */
2130 sin6
->sin6_scope_id
= (uint32_t)strtoul(pscope
, NULL
, 10);
2132 /* XXX should we also embed scope id for linklocal? */
2135 struct sockaddr_in
*sin
= (struct sockaddr_in
*)addr
;
2136 if ((dots
!= 3) && (dots
!= 5)) {
2139 if ((dots
== 3) && (i
!= 4)) {
2142 if ((dots
== 5) && (i
!= 6)) {
2145 bzero(sin
, sizeof(struct sockaddr_in
));
2146 sin
->sin_len
= sizeof(struct sockaddr_in
);
2147 sin
->sin_family
= AF_INET
;
2148 bcopy(a
, &sin
->sin_addr
.s_addr
, sizeof(struct in_addr
));
2150 sin
->sin_port
= htons((in_port_t
)((a
[4] << 8) | a
[5]));
2157 /* NFS Client debugging support */
2158 uint32_t nfs_debug_ctl
;
2160 #include <libkern/libkern.h>
2164 nfs_printf(unsigned int facility
, unsigned int level
, const char *fmt
, ...)
2168 if (NFS_IS_DBG(facility
, level
)) {
2176 #define DISPLAYLEN 16
2181 return ch
>= 0x20 && ch
<= 0x7e;
2185 hexdump(void *data
, size_t len
)
2188 unsigned char *d
= data
;
2189 char *p
, disbuf
[3 * DISPLAYLEN
+ 1];
2191 for (i
= 0; i
< len
; i
+= DISPLAYLEN
) {
2192 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
+= 3) {
2193 snprintf(p
, 4, "%2.2x ", d
[i
+ j
]);
2195 for (; j
< DISPLAYLEN
; j
++, p
+= 3) {
2196 snprintf(p
, 4, " ");
2198 printf("%s ", disbuf
);
2199 for (p
= disbuf
, j
= 0; (j
+ i
) < len
&& j
< DISPLAYLEN
; j
++, p
++) {
2200 snprintf(p
, 2, "%c", isprint(d
[i
+ j
]) ? d
[i
+ j
] : '.');
2202 printf("%s\n", disbuf
);
2207 nfs_dump_mbuf(const char *func
, int lineno
, const char *msg
, mbuf_t mb
)
2211 printf("%s:%d %s\n", func
, lineno
, msg
);
2212 for (m
= mb
; m
; m
= mbuf_next(m
)) {
2213 hexdump(mbuf_data(m
), mbuf_len(m
));
2217 /* Is a mount gone away? */
2219 nfs_mount_gone(struct nfsmount
*nmp
)
2221 return !nmp
|| vfs_isforce(nmp
->nm_mountp
) || (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
));
2225 * Return some of the more significant mount options
2226 * as a string, e.g. "'ro,hard,intr,tcp,vers=3,sec=krb5,deadtimeout=0'
2229 nfs_mountopts(struct nfsmount
*nmp
, char *buf
, int buflen
)
2233 c
= snprintf(buf
, buflen
, "%s,%s,%s,%s,vers=%d,sec=%s,%sdeadtimeout=%d",
2234 (vfs_flags(nmp
->nm_mountp
) & MNT_RDONLY
) ? "ro" : "rw",
2235 NMFLAG(nmp
, SOFT
) ? "soft" : "hard",
2236 NMFLAG(nmp
, INTR
) ? "intr" : "nointr",
2237 nmp
->nm_sotype
== SOCK_STREAM
? "tcp" : "udp",
2239 nmp
->nm_auth
== RPCAUTH_KRB5
? "krb5" :
2240 nmp
->nm_auth
== RPCAUTH_KRB5I
? "krb5i" :
2241 nmp
->nm_auth
== RPCAUTH_KRB5P
? "krb5p" :
2242 nmp
->nm_auth
== RPCAUTH_SYS
? "sys" : "none",
2243 nmp
->nm_lockmode
== NFS_LOCK_MODE_ENABLED
? "locks," :
2244 nmp
->nm_lockmode
== NFS_LOCK_MODE_DISABLED
? "nolocks," :
2245 nmp
->nm_lockmode
== NFS_LOCK_MODE_LOCAL
? "locallocks," : "",
2246 nmp
->nm_deadtimeout
);
2248 return c
> buflen
? ENOMEM
: 0;
2251 #endif /* CONFIG_NFS_CLIENT */
2254 * Schedule a callout thread to run an NFS timer function
2255 * interval milliseconds in the future.
2258 nfs_interval_timer_start(thread_call_t call
, time_t interval
)
2262 clock_interval_to_deadline((int)interval
, 1000 * 1000, &deadline
);
2263 thread_call_enter_delayed(call
, deadline
);
2267 #if CONFIG_NFS_SERVER
2269 int nfsrv_cmp_secflavs(struct nfs_sec
*, struct nfs_sec
*);
2270 int nfsrv_hang_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2271 int nfsrv_free_netopt(struct radix_node
*, void *);
2272 int nfsrv_free_addrlist(struct nfs_export
*, struct user_nfs_export_args
*);
2273 struct nfs_export_options
*nfsrv_export_lookup(struct nfs_export
*, mbuf_t
);
2274 struct nfs_export
*nfsrv_fhtoexport(struct nfs_filehandle
*);
2275 struct nfs_user_stat_node
*nfsrv_get_user_stat_node(struct nfs_active_user_list
*, struct sockaddr
*, uid_t
);
2276 void nfsrv_init_user_list(struct nfs_active_user_list
*);
2277 void nfsrv_free_user_list(struct nfs_active_user_list
*);
2280 * add NFSv3 WCC data to an mbuf chain
2283 nfsm_chain_add_wcc_data_f(
2284 struct nfsrv_descript
*nd
,
2285 struct nfsm_chain
*nmc
,
2287 struct vnode_attr
*prevap
,
2289 struct vnode_attr
*postvap
)
2294 nfsm_chain_add_32(error
, nmc
, FALSE
);
2296 nfsm_chain_add_32(error
, nmc
, TRUE
);
2297 nfsm_chain_add_64(error
, nmc
, prevap
->va_data_size
);
2298 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_modify_time
);
2299 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_change_time
);
2301 nfsm_chain_add_postop_attr(error
, nd
, nmc
, postattrerr
, postvap
);
2307 * Extract a lookup path from the given mbufs and store it in
2308 * a newly allocated buffer saved in the given nameidata structure.
2311 nfsm_chain_get_path_namei(
2312 struct nfsm_chain
*nmc
,
2314 struct nameidata
*nip
)
2316 struct componentname
*cnp
= &nip
->ni_cnd
;
2320 if (len
> (MAXPATHLEN
- 1)) {
2321 return ENAMETOOLONG
;
2325 * Get a buffer for the name to be translated, and copy the
2326 * name into the buffer.
2328 cnp
->cn_pnbuf
= zalloc(ZV_NAMEI
);
2329 cnp
->cn_pnlen
= MAXPATHLEN
;
2330 cnp
->cn_flags
|= HASBUF
;
2332 /* Copy the name from the mbuf list to the string */
2334 nfsm_chain_get_opaque(error
, nmc
, len
, cp
);
2338 cnp
->cn_pnbuf
[len
] = '\0';
2340 /* sanity check the string */
2341 if ((strlen(cp
) != len
) || strchr(cp
, '/')) {
2346 if (cnp
->cn_pnbuf
) {
2347 NFS_ZFREE(ZV_NAMEI
, cnp
->cn_pnbuf
);
2349 cnp
->cn_flags
&= ~HASBUF
;
2351 nip
->ni_pathlen
= len
;
2357 * Set up nameidata for a lookup() call and do it.
2361 struct nfsrv_descript
*nd
,
2363 struct nameidata
*nip
,
2364 struct nfs_filehandle
*nfhp
,
2366 struct nfs_export
**nxp
,
2367 struct nfs_export_options
**nxop
)
2371 struct componentname
*cnp
= &nip
->ni_cnd
;
2378 * Extract and set starting directory.
2380 error
= nfsrv_fhtovp(nfhp
, nd
, &dp
, nxp
, nxop
);
2384 error
= nfsrv_credcheck(nd
, ctx
, *nxp
, *nxop
);
2385 if (error
|| (vnode_vtype(dp
) != VDIR
)) {
2392 nip
->ni_cnd
.cn_context
= ctx
;
2394 if (*nxop
&& ((*nxop
)->nxo_flags
& NX_READONLY
)) {
2395 cnp
->cn_flags
|= RDONLY
;
2398 cnp
->cn_flags
|= NOCROSSMOUNT
;
2399 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2400 nip
->ni_usedvp
= nip
->ni_startdir
= dp
;
2401 nip
->ni_rootdir
= rootvnode
;
2404 * And call lookup() to do the real work
2406 cnflags
= nip
->ni_cnd
.cn_flags
; /* store in case we have to restore */
2407 while ((error
= lookup(nip
)) == ERECYCLE
) {
2408 nip
->ni_cnd
.cn_flags
= cnflags
;
2409 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
2410 nip
->ni_usedvp
= nip
->ni_dvp
= nip
->ni_startdir
= dp
;
2416 /* Check for encountering a symbolic link */
2417 if (cnp
->cn_flags
& ISSYMLINK
) {
2418 if (cnp
->cn_flags
& (LOCKPARENT
| WANTPARENT
)) {
2419 vnode_put(nip
->ni_dvp
);
2422 vnode_put(nip
->ni_vp
);
2429 tmppn
= cnp
->cn_pnbuf
;
2430 cnp
->cn_pnbuf
= NULL
;
2431 cnp
->cn_flags
&= ~HASBUF
;
2432 NFS_ZFREE(ZV_NAMEI
, tmppn
);
2438 * A fiddled version of m_adj() that ensures null fill to a 4-byte
2439 * boundary and only trims off the back end
2442 nfsm_adj(mbuf_t mp
, int len
, int nul
)
2450 * Trim from tail. Scan the mbuf chain,
2451 * calculating its length and finding the last mbuf.
2452 * If the adjustment only affects this mbuf, then just
2453 * adjust and return. Otherwise, rescan and truncate
2454 * after the remaining size.
2461 mnext
= mbuf_next(m
);
2462 if (mnext
== NULL
) {
2469 mbuf_setlen(m
, mlen
);
2471 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2472 for (i
= 0; i
< nul
; i
++) {
2483 * Correct length for chain is "count".
2484 * Find the mbuf with last data, adjust its length,
2485 * and toss data from remaining mbufs on chain.
2487 for (m
= mp
; m
; m
= mbuf_next(m
)) {
2489 if (mlen
>= count
) {
2491 mbuf_setlen(m
, count
);
2493 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
2494 for (i
= 0; i
< nul
; i
++) {
2502 for (m
= mbuf_next(m
); m
; m
= mbuf_next(m
)) {
2508 * Trim the header out of the mbuf list and trim off any trailing
2509 * junk so that the mbuf list has only the write data.
2512 nfsm_chain_trim_data(struct nfsm_chain
*nmc
, int len
, int *mlen
)
2524 for (m
= nmc
->nmc_mhead
; m
&& (m
!= nmc
->nmc_mcur
); m
= mbuf_next(m
)) {
2531 /* trim current mbuf */
2532 data
= mbuf_data(m
);
2534 adjust
= nmc
->nmc_ptr
- data
;
2536 if ((dlen
> 0) && (adjust
> 0)) {
2537 if (mbuf_setdata(m
, nmc
->nmc_ptr
, dlen
)) {
2541 mbuf_setlen(m
, dlen
);
2544 /* skip next len bytes */
2545 for (; m
&& (cnt
< len
); m
= mbuf_next(m
)) {
2549 /* truncate to end of data */
2550 mbuf_setlen(m
, dlen
- (cnt
- len
));
2551 if (m
== nmc
->nmc_mcur
) {
2552 nmc
->nmc_left
-= (cnt
- len
);
2561 /* trim any trailing data */
2562 if (m
== nmc
->nmc_mcur
) {
2565 for (; m
; m
= mbuf_next(m
)) {
2573 nfsm_chain_add_fattr(
2574 struct nfsrv_descript
*nd
,
2575 struct nfsm_chain
*nmc
,
2576 struct vnode_attr
*vap
)
2580 // XXX Should we assert here that all fields are supported?
2582 nfsm_chain_add_32(error
, nmc
, vtonfs_type(vap
->va_type
, nd
->nd_vers
));
2583 if (nd
->nd_vers
== NFS_VER3
) {
2584 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
& 07777);
2586 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
, vap
->va_mode
));
2588 nfsm_chain_add_32(error
, nmc
, vap
->va_nlink
);
2589 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
2590 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
2591 if (nd
->nd_vers
== NFS_VER3
) {
2592 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
2593 nfsm_chain_add_64(error
, nmc
, vap
->va_data_alloc
);
2594 nfsm_chain_add_32(error
, nmc
, major(vap
->va_rdev
));
2595 nfsm_chain_add_32(error
, nmc
, minor(vap
->va_rdev
));
2596 nfsm_chain_add_64(error
, nmc
, vap
->va_fsid
);
2597 nfsm_chain_add_64(error
, nmc
, vap
->va_fileid
);
2599 nfsm_chain_add_32(error
, nmc
, vap
->va_data_size
);
2600 nfsm_chain_add_32(error
, nmc
, NFS_FABLKSIZE
);
2601 if (vap
->va_type
== VFIFO
) {
2602 nfsm_chain_add_32(error
, nmc
, 0xffffffff);
2604 nfsm_chain_add_32(error
, nmc
, vap
->va_rdev
);
2606 nfsm_chain_add_32(error
, nmc
, vap
->va_data_alloc
/ NFS_FABLKSIZE
);
2607 nfsm_chain_add_32(error
, nmc
, vap
->va_fsid
);
2608 nfsm_chain_add_32(error
, nmc
, vap
->va_fileid
);
2610 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_access_time
);
2611 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_modify_time
);
2612 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_change_time
);
2618 nfsm_chain_get_sattr(
2619 struct nfsrv_descript
*nd
,
2620 struct nfsm_chain
*nmc
,
2621 struct vnode_attr
*vap
)
2626 struct timespec now
;
2628 if (nd
->nd_vers
== NFS_VER2
) {
2630 * There is/was a bug in the Sun client that puts 0xffff in the mode
2631 * field of sattr when it should put in 0xffffffff. The u_short
2632 * doesn't sign extend. So check the low order 2 bytes for 0xffff.
2634 nfsm_chain_get_32(error
, nmc
, val
);
2635 if ((val
& 0xffff) != 0xffff) {
2636 VATTR_SET(vap
, va_mode
, val
& 07777);
2637 /* save the "type" bits for NFSv2 create */
2638 VATTR_SET(vap
, va_type
, IFTOVT(val
));
2639 VATTR_CLEAR_ACTIVE(vap
, va_type
);
2641 nfsm_chain_get_32(error
, nmc
, val
);
2642 if (val
!= (uint32_t)-1) {
2643 VATTR_SET(vap
, va_uid
, val
);
2645 nfsm_chain_get_32(error
, nmc
, val
);
2646 if (val
!= (uint32_t)-1) {
2647 VATTR_SET(vap
, va_gid
, val
);
2649 /* save the "size" bits for NFSv2 create (even if they appear unset) */
2650 nfsm_chain_get_32(error
, nmc
, val
);
2651 VATTR_SET(vap
, va_data_size
, val
);
2652 if (val
== (uint32_t)-1) {
2653 VATTR_CLEAR_ACTIVE(vap
, va_data_size
);
2655 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2656 vap
->va_access_time
.tv_sec
,
2657 vap
->va_access_time
.tv_nsec
);
2658 if (vap
->va_access_time
.tv_sec
!= -1) {
2659 VATTR_SET_ACTIVE(vap
, va_access_time
);
2661 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
2662 vap
->va_modify_time
.tv_sec
,
2663 vap
->va_modify_time
.tv_nsec
);
2664 if (vap
->va_modify_time
.tv_sec
!= -1) {
2665 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2671 nfsm_chain_get_32(error
, nmc
, val
);
2673 nfsm_chain_get_32(error
, nmc
, val
);
2674 VATTR_SET(vap
, va_mode
, val
& 07777);
2676 nfsm_chain_get_32(error
, nmc
, val
);
2678 nfsm_chain_get_32(error
, nmc
, val
);
2679 VATTR_SET(vap
, va_uid
, val
);
2681 nfsm_chain_get_32(error
, nmc
, val
);
2683 nfsm_chain_get_32(error
, nmc
, val
);
2684 VATTR_SET(vap
, va_gid
, val
);
2686 nfsm_chain_get_32(error
, nmc
, val
);
2688 nfsm_chain_get_64(error
, nmc
, val64
);
2689 VATTR_SET(vap
, va_data_size
, val64
);
2692 nfsm_chain_get_32(error
, nmc
, val
);
2694 case NFS_TIME_SET_TO_CLIENT
:
2695 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2696 vap
->va_access_time
.tv_sec
,
2697 vap
->va_access_time
.tv_nsec
);
2698 VATTR_SET_ACTIVE(vap
, va_access_time
);
2699 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2701 case NFS_TIME_SET_TO_SERVER
:
2702 VATTR_SET(vap
, va_access_time
, now
);
2703 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2706 nfsm_chain_get_32(error
, nmc
, val
);
2708 case NFS_TIME_SET_TO_CLIENT
:
2709 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
2710 vap
->va_modify_time
.tv_sec
,
2711 vap
->va_modify_time
.tv_nsec
);
2712 VATTR_SET_ACTIVE(vap
, va_modify_time
);
2713 vap
->va_vaflags
&= ~VA_UTIMES_NULL
;
2715 case NFS_TIME_SET_TO_SERVER
:
2716 VATTR_SET(vap
, va_modify_time
, now
);
2717 if (!VATTR_IS_ACTIVE(vap
, va_access_time
)) {
2718 vap
->va_vaflags
|= VA_UTIMES_NULL
;
2727 * Compare two security flavor structs
2730 nfsrv_cmp_secflavs(struct nfs_sec
*sf1
, struct nfs_sec
*sf2
)
2734 if (sf1
->count
!= sf2
->count
) {
2737 for (i
= 0; i
< sf1
->count
; i
++) {
2738 if (sf1
->flavors
[i
] != sf2
->flavors
[i
]) {
2746 * Build hash lists of net addresses and hang them off the NFS export.
2747 * Called by nfsrv_export() to set up the lists of export addresses.
2750 nfsrv_hang_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2752 struct nfs_export_net_args nxna
;
2753 struct nfs_netopt
*no
, *rn_no
;
2754 struct radix_node_head
*rnh
;
2755 struct radix_node
*rn
;
2756 struct sockaddr
*saddr
, *smask
;
2764 uaddr
= unxa
->nxa_nets
;
2765 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2766 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2771 if (nxna
.nxna_addr
.ss_len
> sizeof(struct sockaddr_storage
) ||
2772 nxna
.nxna_mask
.ss_len
> sizeof(struct sockaddr_storage
) ||
2773 nxna
.nxna_addr
.ss_family
> AF_MAX
||
2774 nxna
.nxna_mask
.ss_family
> AF_MAX
) {
2778 if (nxna
.nxna_flags
& (NX_MAPROOT
| NX_MAPALL
)) {
2779 struct posix_cred temp_pcred
;
2780 bzero(&temp_pcred
, sizeof(temp_pcred
));
2781 temp_pcred
.cr_uid
= nxna
.nxna_cred
.cr_uid
;
2782 temp_pcred
.cr_ngroups
= nxna
.nxna_cred
.cr_ngroups
;
2783 for (i
= 0; i
< (size_t)nxna
.nxna_cred
.cr_ngroups
&& i
< NGROUPS
; i
++) {
2784 temp_pcred
.cr_groups
[i
] = nxna
.nxna_cred
.cr_groups
[i
];
2786 cred
= posix_cred_create(&temp_pcred
);
2787 if (!IS_VALID_CRED(cred
)) {
2794 if (nxna
.nxna_addr
.ss_len
== 0) {
2795 /* No address means this is a default/world export */
2796 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2797 if (IS_VALID_CRED(cred
)) {
2798 kauth_cred_unref(&cred
);
2802 nx
->nx_flags
|= NX_DEFAULTEXPORT
;
2803 nx
->nx_defopt
.nxo_flags
= nxna
.nxna_flags
;
2804 nx
->nx_defopt
.nxo_cred
= cred
;
2805 bcopy(&nxna
.nxna_sec
, &nx
->nx_defopt
.nxo_sec
, sizeof(struct nfs_sec
));
2810 i
= sizeof(struct nfs_netopt
);
2811 i
+= nxna
.nxna_addr
.ss_len
+ nxna
.nxna_mask
.ss_len
;
2812 MALLOC(no
, struct nfs_netopt
*, i
, M_NETADDR
, M_WAITOK
);
2814 if (IS_VALID_CRED(cred
)) {
2815 kauth_cred_unref(&cred
);
2819 bzero(no
, sizeof(struct nfs_netopt
));
2820 no
->no_opt
.nxo_flags
= nxna
.nxna_flags
;
2821 no
->no_opt
.nxo_cred
= cred
;
2822 bcopy(&nxna
.nxna_sec
, &no
->no_opt
.nxo_sec
, sizeof(struct nfs_sec
));
2824 saddr
= (struct sockaddr
*)(no
+ 1);
2825 bcopy(&nxna
.nxna_addr
, saddr
, nxna
.nxna_addr
.ss_len
);
2826 if (nxna
.nxna_mask
.ss_len
) {
2827 smask
= (struct sockaddr
*)((caddr_t
)saddr
+ nxna
.nxna_addr
.ss_len
);
2828 bcopy(&nxna
.nxna_mask
, smask
, nxna
.nxna_mask
.ss_len
);
2832 sa_family_t family
= saddr
->sa_family
;
2833 if ((rnh
= nx
->nx_rtable
[family
]) == 0) {
2835 * Seems silly to initialize every AF when most are not
2836 * used, do so on demand here
2838 TAILQ_FOREACH(dom
, &domains
, dom_entry
) {
2839 if (dom
->dom_family
== family
&& dom
->dom_rtattach
) {
2840 dom
->dom_rtattach((void **)&nx
->nx_rtable
[family
],
2845 if ((rnh
= nx
->nx_rtable
[family
]) == 0) {
2846 if (IS_VALID_CRED(cred
)) {
2847 kauth_cred_unref(&cred
);
2849 _FREE(no
, M_NETADDR
);
2853 rn
= (*rnh
->rnh_addaddr
)((caddr_t
)saddr
, (caddr_t
)smask
, rnh
, no
->no_rnodes
);
2856 * One of the reasons that rnh_addaddr may fail is that
2857 * the entry already exists. To check for this case, we
2858 * look up the entry to see if it is there. If so, we
2859 * do not need to make a new entry but do continue.
2861 * XXX should this be rnh_lookup() instead?
2864 rn
= (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
2865 rn_no
= (struct nfs_netopt
*)rn
;
2866 if (rn
!= 0 && (rn
->rn_flags
& RNF_ROOT
) == 0 &&
2867 (rn_no
->no_opt
.nxo_flags
== nxna
.nxna_flags
) &&
2868 (!nfsrv_cmp_secflavs(&rn_no
->no_opt
.nxo_sec
, &nxna
.nxna_sec
))) {
2869 kauth_cred_t cred2
= rn_no
->no_opt
.nxo_cred
;
2870 if (cred
== cred2
) {
2871 /* creds are same (or both NULL) */
2873 } else if (cred
&& cred2
&& (kauth_cred_getuid(cred
) == kauth_cred_getuid(cred2
))) {
2875 * Now compare the effective and
2876 * supplementary groups...
2878 * Note: This comparison, as written,
2879 * does not correctly indicate that
2880 * the groups are equivalent, since
2881 * other than the first supplementary
2882 * group, which is also the effective
2883 * group, order on the remaining groups
2884 * doesn't matter, and this is an
2887 gid_t groups
[NGROUPS
];
2888 gid_t groups2
[NGROUPS
];
2889 size_t groupcount
= NGROUPS
;
2890 size_t group2count
= NGROUPS
;
2892 if (!kauth_cred_getgroups(cred
, groups
, &groupcount
) &&
2893 !kauth_cred_getgroups(cred2
, groups2
, &group2count
) &&
2894 groupcount
== group2count
) {
2895 for (i
= 0; i
< group2count
; i
++) {
2896 if (groups
[i
] != groups2
[i
]) {
2900 if (i
>= group2count
|| i
>= NGROUPS
) {
2906 if (IS_VALID_CRED(cred
)) {
2907 kauth_cred_unref(&cred
);
2909 _FREE(no
, M_NETADDR
);
2922 * In order to properly track an export's netopt count, we need to pass
2923 * an additional argument to nfsrv_free_netopt() so that it can decrement
2924 * the export's netopt count.
2926 struct nfsrv_free_netopt_arg
{
2928 struct radix_node_head
*rnh
;
2932 nfsrv_free_netopt(struct radix_node
*rn
, void *w
)
2934 struct nfsrv_free_netopt_arg
*fna
= (struct nfsrv_free_netopt_arg
*)w
;
2935 struct radix_node_head
*rnh
= fna
->rnh
;
2936 uint32_t *cnt
= fna
->cnt
;
2937 struct nfs_netopt
*nno
= (struct nfs_netopt
*)rn
;
2939 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2940 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
2941 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
2943 _FREE((caddr_t
)rn
, M_NETADDR
);
2949 * Free the net address hash lists that are hanging off the mount points.
2952 nfsrv_free_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2954 struct nfs_export_net_args nxna
;
2955 struct radix_node_head
*rnh
;
2956 struct radix_node
*rn
;
2957 struct nfsrv_free_netopt_arg fna
;
2958 struct nfs_netopt
*nno
;
2963 if (!unxa
|| !unxa
->nxa_netcount
) {
2964 /* delete everything */
2965 for (i
= 0; i
<= AF_MAX
; i
++) {
2966 if ((rnh
= nx
->nx_rtable
[i
])) {
2968 fna
.cnt
= &nx
->nx_expcnt
;
2969 (*rnh
->rnh_walktree
)(rnh
, nfsrv_free_netopt
, (caddr_t
)&fna
);
2970 _FREE((caddr_t
)rnh
, M_RTABLE
);
2971 nx
->nx_rtable
[i
] = 0;
2977 /* delete only the exports specified */
2978 uaddr
= unxa
->nxa_nets
;
2979 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2980 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2985 if (nxna
.nxna_addr
.ss_len
== 0) {
2986 /* No address means this is a default/world export */
2987 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2988 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2989 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2990 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2997 if ((rnh
= nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
]) == 0) {
2998 /* AF not initialized? */
2999 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3000 printf("nfsrv_free_addrlist: address not found (0)\n");
3005 rn
= (*rnh
->rnh_lookup
)(&nxna
.nxna_addr
,
3006 nxna
.nxna_mask
.ss_len
? &nxna
.nxna_mask
: NULL
, rnh
);
3007 if (!rn
|| (rn
->rn_flags
& RNF_ROOT
)) {
3008 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3009 printf("nfsrv_free_addrlist: address not found (1)\n");
3014 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
3015 nno
= (struct nfs_netopt
*)rn
;
3016 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
)) {
3017 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
3019 _FREE((caddr_t
)rn
, M_NETADDR
);
3022 if (nx
->nx_expcnt
== ((nx
->nx_flags
& NX_DEFAULTEXPORT
) ? 1 : 0)) {
3023 /* no more entries in rnh, so free it up */
3024 _FREE((caddr_t
)rnh
, M_RTABLE
);
3025 nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
] = 0;
3032 void enablequotas(struct mount
*mp
, vfs_context_t ctx
); // XXX
3035 nfsrv_export(struct user_nfs_export_args
*unxa
, vfs_context_t ctx
)
3039 struct nfs_exportfs
*nxfs
, *nxfs2
, *nxfs3
;
3040 struct nfs_export
*nx
, *nx2
, *nx3
;
3041 struct nfs_filehandle nfh
;
3042 struct nameidata mnd
, xnd
;
3043 vnode_t mvp
= NULL
, xvp
= NULL
;
3045 char path
[MAXPATHLEN
];
3046 char fl_pathbuff
[MAXPATHLEN
];
3047 int fl_pathbuff_len
= MAXPATHLEN
;
3050 if (unxa
->nxa_flags
== NXA_CHECK
) {
3051 /* just check if the path is an NFS-exportable file system */
3052 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3056 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3057 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3058 error
= namei(&mnd
);
3063 mp
= vnode_mount(mvp
);
3064 /* make sure it's the root of a file system */
3065 if (!vnode_isvroot(mvp
)) {
3068 /* make sure the file system is NFS-exportable */
3070 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3071 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3073 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3076 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3084 /* all other operations: must be super user */
3085 if ((error
= vfs_context_suser(ctx
))) {
3089 if (unxa
->nxa_flags
& NXA_DELETE_ALL
) {
3090 /* delete all exports on all file systems */
3091 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3092 while ((nxfs
= LIST_FIRST(&nfsrv_exports
))) {
3093 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3095 vfs_clearflags(mp
, MNT_EXPORTED
);
3099 /* delete all exports on this file system */
3100 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3101 LIST_REMOVE(nx
, nx_next
);
3102 LIST_REMOVE(nx
, nx_hash
);
3103 /* delete all netopts for this export */
3104 nfsrv_free_addrlist(nx
, NULL
);
3105 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3106 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3107 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3109 /* free active user list for this export */
3110 nfsrv_free_user_list(&nx
->nx_user_list
);
3111 FREE(nx
->nx_path
, M_TEMP
);
3114 LIST_REMOVE(nxfs
, nxfs_next
);
3115 FREE(nxfs
->nxfs_path
, M_TEMP
);
3118 if (nfsrv_export_hashtbl
) {
3119 /* all exports deleted, clean up export hash table */
3120 FREE(nfsrv_export_hashtbl
, M_TEMP
);
3121 nfsrv_export_hashtbl
= NULL
;
3123 lck_rw_done(&nfsrv_export_rwlock
);
3127 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, &pathlen
);
3132 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
3134 /* init export hash table if not already */
3135 if (!nfsrv_export_hashtbl
) {
3136 if (nfsrv_export_hash_size
<= 0) {
3137 nfsrv_export_hash_size
= NFSRVEXPHASHSZ
;
3139 nfsrv_export_hashtbl
= hashinit(nfsrv_export_hash_size
, M_TEMP
, &nfsrv_export_hash
);
3142 // first check if we've already got an exportfs with the given ID
3143 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3144 if (nxfs
->nxfs_id
== unxa
->nxa_fsid
) {
3149 /* verify exported FS path matches given path */
3150 if (strncmp(path
, nxfs
->nxfs_path
, MAXPATHLEN
)) {
3154 if ((unxa
->nxa_flags
& (NXA_ADD
| NXA_OFFLINE
)) == NXA_ADD
) {
3155 /* find exported FS root vnode */
3156 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3157 UIO_SYSSPACE
, CAST_USER_ADDR_T(nxfs
->nxfs_path
), ctx
);
3158 error
= namei(&mnd
);
3163 /* make sure it's (still) the root of a file system */
3164 if (!vnode_isvroot(mvp
)) {
3168 /* if adding, verify that the mount is still what we expect */
3169 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
3171 /* check for firmlink-free path */
3172 if (vn_getpath_no_firmlink(mvp
, fl_pathbuff
, &fl_pathbuff_len
) == 0 &&
3173 fl_pathbuff_len
> 0 &&
3174 !strncmp(nxfs
->nxfs_path
, fl_pathbuff
, MAXPATHLEN
)) {
3175 mp
= vfs_getvfs_by_mntonname(vnode_mount(mvp
)->mnt_vfsstat
.f_mntonname
);
3182 /* sanity check: this should be same mount */
3183 if (mp
!= vnode_mount(mvp
)) {
3189 /* no current exported file system with that ID */
3190 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3195 /* find exported FS root vnode */
3196 NDINIT(&mnd
, LOOKUP
, OP_LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
3197 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
3198 error
= namei(&mnd
);
3200 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3205 /* make sure it's the root of a file system */
3206 if (!vnode_isvroot(mvp
)) {
3207 /* bail if not marked offline */
3208 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
3216 mp
= vnode_mount(mvp
);
3219 /* make sure the file system is NFS-exportable */
3220 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3221 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
3222 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3225 if (!error
&& !(mp
->mnt_vtable
->vfc_vfsflags
& VFC_VFSREADDIR_EXTENDED
)) {
3234 /* add an exportfs for it */
3235 MALLOC(nxfs
, struct nfs_exportfs
*, sizeof(struct nfs_exportfs
), M_TEMP
, M_WAITOK
);
3240 bzero(nxfs
, sizeof(struct nfs_exportfs
));
3241 nxfs
->nxfs_id
= unxa
->nxa_fsid
;
3242 MALLOC(nxfs
->nxfs_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3243 if (!nxfs
->nxfs_path
) {
3248 bcopy(path
, nxfs
->nxfs_path
, pathlen
);
3249 /* insert into list in reverse-sorted order */
3251 LIST_FOREACH(nxfs2
, &nfsrv_exports
, nxfs_next
) {
3252 if (strncmp(nxfs
->nxfs_path
, nxfs2
->nxfs_path
, MAXPATHLEN
) > 0) {
3258 LIST_INSERT_BEFORE(nxfs2
, nxfs
, nxfs_next
);
3260 LIST_INSERT_AFTER(nxfs3
, nxfs
, nxfs_next
);
3262 LIST_INSERT_HEAD(&nfsrv_exports
, nxfs
, nxfs_next
);
3265 /* make sure any quotas are enabled before we export the file system */
3267 enablequotas(mp
, ctx
);
3271 if (unxa
->nxa_exppath
) {
3272 error
= copyinstr(unxa
->nxa_exppath
, path
, MAXPATHLEN
, &pathlen
);
3276 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3277 if (nx
->nx_id
== unxa
->nxa_expid
) {
3282 /* verify exported FS path matches given path */
3283 if (strncmp(path
, nx
->nx_path
, MAXPATHLEN
)) {
3288 /* no current export with that ID */
3289 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
3293 /* add an export for it */
3294 MALLOC(nx
, struct nfs_export
*, sizeof(struct nfs_export
), M_TEMP
, M_WAITOK
);
3299 bzero(nx
, sizeof(struct nfs_export
));
3300 nx
->nx_id
= unxa
->nxa_expid
;
3302 microtime(&nx
->nx_exptime
);
3303 MALLOC(nx
->nx_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
3310 bcopy(path
, nx
->nx_path
, pathlen
);
3311 /* initialize the active user list */
3312 nfsrv_init_user_list(&nx
->nx_user_list
);
3313 /* insert into list in reverse-sorted order */
3315 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3316 if (strncmp(nx
->nx_path
, nx2
->nx_path
, MAXPATHLEN
) > 0) {
3322 LIST_INSERT_BEFORE(nx2
, nx
, nx_next
);
3324 LIST_INSERT_AFTER(nx3
, nx
, nx_next
);
3326 LIST_INSERT_HEAD(&nxfs
->nxfs_exports
, nx
, nx_next
);
3328 /* insert into hash */
3329 LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs
->nxfs_id
, nx
->nx_id
), nx
, nx_hash
);
3332 * We don't allow/support nested exports. Check if the new entry
3333 * nests with the entries before and after or if there's an
3334 * entry for the file system root and subdirs.
3337 if ((nx3
&& !strncmp(nx3
->nx_path
, nx
->nx_path
, pathlen
- 1) &&
3338 (nx3
->nx_path
[pathlen
- 1] == '/')) ||
3339 (nx2
&& !strncmp(nx2
->nx_path
, nx
->nx_path
, strlen(nx2
->nx_path
)) &&
3340 (nx
->nx_path
[strlen(nx2
->nx_path
)] == '/'))) {
3344 /* check export conflict with fs root export and vice versa */
3345 expisroot
= !nx
->nx_path
[0] ||
3346 ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1]);
3347 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
3352 } else if (!nx2
->nx_path
[0]) {
3354 } else if ((nx2
->nx_path
[0] == '.') && !nx2
->nx_path
[1]) {
3364 * Don't actually return an error because mountd is
3365 * probably about to delete the conflicting export.
3366 * This can happen when a new export momentarily conflicts
3367 * with an old export while the transition is being made.
3368 * Theoretically, mountd could be written to avoid this
3369 * transient situation - but it would greatly increase the
3370 * complexity of mountd for very little overall benefit.
3372 printf("nfsrv_export: warning: nested exports: %s/%s\n",
3373 nxfs
->nxfs_path
, nx
->nx_path
);
3376 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3378 /* make sure file handle is set up */
3379 if ((nx
->nx_fh
.nfh_xh
.nxh_version
!= htonl(NFS_FH_VERSION
)) ||
3380 (nx
->nx_fh
.nfh_xh
.nxh_flags
& NXHF_INVALIDFH
)) {
3381 /* try to set up export root file handle */
3382 nx
->nx_fh
.nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3383 nx
->nx_fh
.nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3384 nx
->nx_fh
.nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3385 nx
->nx_fh
.nfh_xh
.nxh_flags
= 0;
3386 nx
->nx_fh
.nfh_xh
.nxh_reserved
= 0;
3387 nx
->nx_fh
.nfh_fhp
= (u_char
*)&nx
->nx_fh
.nfh_xh
;
3388 bzero(&nx
->nx_fh
.nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3390 /* find export root vnode */
3391 if (!nx
->nx_path
[0] || ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1])) {
3392 /* exporting file system's root directory */
3396 xnd
.ni_cnd
.cn_nameiop
= LOOKUP
;
3398 xnd
.ni_op
= OP_LOOKUP
;
3400 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3401 xnd
.ni_pathlen
= (uint32_t)pathlen
- 1; // pathlen max value is equal to MAXPATHLEN
3402 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
= path
;
3403 xnd
.ni_startdir
= mvp
;
3404 xnd
.ni_usedvp
= mvp
;
3405 xnd
.ni_rootdir
= rootvnode
;
3406 xnd
.ni_cnd
.cn_context
= ctx
;
3407 while ((error
= lookup(&xnd
)) == ERECYCLE
) {
3408 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
3409 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
;
3410 xnd
.ni_usedvp
= xnd
.ni_dvp
= xnd
.ni_startdir
= mvp
;
3418 if (vnode_vtype(xvp
) != VDIR
) {
3424 /* grab file handle */
3425 nx
->nx_fh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
3426 error
= VFS_VPTOFH(xvp
, (int*)&nx
->nx_fh
.nfh_len
, &nx
->nx_fh
.nfh_fid
[0], NULL
);
3427 if (!error
&& (nx
->nx_fh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
3430 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= nx
->nx_fh
.nfh_len
;
3431 nx
->nx_fh
.nfh_len
+= sizeof(nx
->nx_fh
.nfh_xh
);
3439 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
3440 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= 0;
3441 nx
->nx_fh
.nfh_len
= sizeof(nx
->nx_fh
.nfh_xh
);
3448 /* perform the export changes */
3449 if (unxa
->nxa_flags
& NXA_DELETE
) {
3451 /* delete all exports on this file system */
3452 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
3453 LIST_REMOVE(nx
, nx_next
);
3454 LIST_REMOVE(nx
, nx_hash
);
3455 /* delete all netopts for this export */
3456 nfsrv_free_addrlist(nx
, NULL
);
3457 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3458 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3459 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3461 /* delete active user list for this export */
3462 nfsrv_free_user_list(&nx
->nx_user_list
);
3463 FREE(nx
->nx_path
, M_TEMP
);
3467 } else if (!unxa
->nxa_netcount
) {
3468 /* delete all netopts for this export */
3469 nfsrv_free_addrlist(nx
, NULL
);
3470 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
3471 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
3472 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
3475 /* delete only the netopts for the given addresses */
3476 error
= nfsrv_free_addrlist(nx
, unxa
);
3482 if (unxa
->nxa_flags
& NXA_ADD
) {
3484 * If going offline set the export time so that when
3485 * coming back on line we will present a new write verifier
3488 if (unxa
->nxa_flags
& NXA_OFFLINE
) {
3489 microtime(&nx
->nx_exptime
);
3492 error
= nfsrv_hang_addrlist(nx
, unxa
);
3494 vfs_setflags(mp
, MNT_EXPORTED
);
3499 if (nx
&& !nx
->nx_expcnt
) {
3500 /* export has no export options */
3501 LIST_REMOVE(nx
, nx_next
);
3502 LIST_REMOVE(nx
, nx_hash
);
3503 /* delete active user list for this export */
3504 nfsrv_free_user_list(&nx
->nx_user_list
);
3505 FREE(nx
->nx_path
, M_TEMP
);
3508 if (LIST_EMPTY(&nxfs
->nxfs_exports
)) {
3509 /* exported file system has no more exports */
3510 LIST_REMOVE(nxfs
, nxfs_next
);
3511 FREE(nxfs
->nxfs_path
, M_TEMP
);
3514 vfs_clearflags(mp
, MNT_EXPORTED
);
3527 lck_rw_done(&nfsrv_export_rwlock
);
3532 * Check if there is a least one export that will allow this address.
3534 * Return 0, if there is an export that will allow this address,
3535 * else return EACCES
3538 nfsrv_check_exports_allow_address(mbuf_t nam
)
3540 struct nfs_exportfs
*nxfs
;
3541 struct nfs_export
*nx
;
3542 struct nfs_export_options
*nxo
= NULL
;
3548 lck_rw_lock_shared(&nfsrv_export_rwlock
);
3549 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3550 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3551 /* A little optimizing by checking for the default first */
3552 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
3553 nxo
= &nx
->nx_defopt
;
3555 if (nxo
|| (nxo
= nfsrv_export_lookup(nx
, nam
))) {
3561 lck_rw_done(&nfsrv_export_rwlock
);
3563 return nxo
? 0 : EACCES
;
3566 struct nfs_export_options
*
3567 nfsrv_export_lookup(struct nfs_export
*nx
, mbuf_t nam
)
3569 struct nfs_export_options
*nxo
= NULL
;
3570 struct nfs_netopt
*no
= NULL
;
3571 struct radix_node_head
*rnh
;
3572 struct sockaddr
*saddr
;
3574 /* Lookup in the export list first. */
3576 saddr
= mbuf_data(nam
);
3577 if (saddr
->sa_family
> AF_MAX
) {
3578 /* Bogus sockaddr? Don't match anything. */
3581 rnh
= nx
->nx_rtable
[saddr
->sa_family
];
3583 no
= (struct nfs_netopt
*)
3584 (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
3585 if (no
&& no
->no_rnodes
->rn_flags
& RNF_ROOT
) {
3593 /* If no address match, use the default if it exists. */
3594 if ((nxo
== NULL
) && (nx
->nx_flags
& NX_DEFAULTEXPORT
)) {
3595 nxo
= &nx
->nx_defopt
;
3600 /* find an export for the given handle */
3602 nfsrv_fhtoexport(struct nfs_filehandle
*nfhp
)
3604 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3605 struct nfs_export
*nx
;
3606 uint32_t fsid
, expid
;
3608 if (!nfsrv_export_hashtbl
) {
3611 fsid
= ntohl(nxh
->nxh_fsid
);
3612 expid
= ntohl(nxh
->nxh_expid
);
3613 nx
= NFSRVEXPHASH(fsid
, expid
)->lh_first
;
3614 for (; nx
; nx
= LIST_NEXT(nx
, nx_hash
)) {
3615 if (nx
->nx_fs
->nxfs_id
!= fsid
) {
3618 if (nx
->nx_id
!= expid
) {
3626 struct nfsrv_getvfs_by_mntonname_callback_args
{
3627 const char *path
; /* IN */
3628 mount_t mp
; /* OUT */
3632 nfsrv_getvfs_by_mntonname_callback(mount_t mp
, void *v
)
3634 struct nfsrv_getvfs_by_mntonname_callback_args
* const args
= v
;
3635 char real_mntonname
[MAXPATHLEN
];
3636 int pathbuflen
= MAXPATHLEN
;
3640 error
= VFS_ROOT(mp
, &rvp
, vfs_context_current());
3644 error
= vn_getpath_ext(rvp
, NULLVP
, real_mntonname
, &pathbuflen
,
3645 VN_GETPATH_FSENTER
| VN_GETPATH_NO_FIRMLINK
);
3650 if (strcmp(args
->path
, real_mntonname
) == 0) {
3651 error
= vfs_busy(mp
, LK_NOWAIT
);
3655 return VFS_RETURNED_DONE
;
3658 return VFS_RETURNED
;
3662 nfsrv_getvfs_by_mntonname(char *path
)
3664 struct nfsrv_getvfs_by_mntonname_callback_args args
= {
3671 mp
= vfs_getvfs_by_mntonname(path
);
3673 error
= vfs_busy(mp
, LK_NOWAIT
);
3678 } else if (vfs_iterate(0, nfsrv_getvfs_by_mntonname_callback
,
3686 * nfsrv_fhtovp() - convert FH to vnode and export info
3690 struct nfs_filehandle
*nfhp
,
3691 struct nfsrv_descript
*nd
,
3693 struct nfs_export
**nxp
,
3694 struct nfs_export_options
**nxop
)
3696 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
3697 struct nfs_export_options
*nxo
;
3713 v
= ntohl(nxh
->nxh_version
);
3714 if (v
!= NFS_FH_VERSION
) {
3715 /* file handle format not supported */
3718 if (nfhp
->nfh_len
> NFSV3_MAX_FH_SIZE
) {
3721 if (nfhp
->nfh_len
< (int)sizeof(struct nfs_exphandle
)) {
3724 v
= ntohs(nxh
->nxh_flags
);
3725 if (v
& NXHF_INVALIDFH
) {
3729 *nxp
= nfsrv_fhtoexport(nfhp
);
3734 /* Get the export option structure for this <export, client> tuple. */
3735 *nxop
= nxo
= nfsrv_export_lookup(*nxp
, nam
);
3736 if (nam
&& (*nxop
== NULL
)) {
3741 /* Validate the security flavor of the request */
3742 for (i
= 0, valid
= 0; i
< nxo
->nxo_sec
.count
; i
++) {
3743 if (nd
->nd_sec
== nxo
->nxo_sec
.flavors
[i
]) {
3750 * RFC 2623 section 2.3.2 recommends no authentication
3751 * requirement for certain NFS procedures used for mounting.
3752 * This allows an unauthenticated superuser on the client
3753 * to do mounts for the benefit of authenticated users.
3755 if (nd
->nd_vers
== NFS_VER2
) {
3756 if (nd
->nd_procnum
== NFSV2PROC_GETATTR
||
3757 nd
->nd_procnum
== NFSV2PROC_STATFS
) {
3761 if (nd
->nd_vers
== NFS_VER3
) {
3762 if (nd
->nd_procnum
== NFSPROC_FSINFO
) {
3768 return NFSERR_AUTHERR
| AUTH_REJECTCRED
;
3773 if (nxo
&& (nxo
->nxo_flags
& NX_OFFLINE
)) {
3774 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3777 /* find mount structure */
3778 mp
= nfsrv_getvfs_by_mntonname((*nxp
)->nx_fs
->nxfs_path
);
3781 * We have an export, but no mount?
3782 * Perhaps the export just hasn't been marked offline yet.
3784 return (nd
== NULL
|| nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
;
3787 fidp
= nfhp
->nfh_fhp
+ sizeof(*nxh
);
3788 error
= VFS_FHTOVP(mp
, nxh
->nxh_fidlen
, fidp
, vpp
, NULL
);
3793 /* vnode pointer should be good at this point or ... */
3801 * nfsrv_credcheck() - check/map credentials according
3802 * to given export options.
3806 struct nfsrv_descript
*nd
,
3808 __unused
struct nfs_export
*nx
,
3809 struct nfs_export_options
*nxo
)
3811 if (nxo
&& nxo
->nxo_cred
) {
3812 if ((nxo
->nxo_flags
& NX_MAPALL
) ||
3813 ((nxo
->nxo_flags
& NX_MAPROOT
) && !suser(nd
->nd_cr
, NULL
))) {
3814 kauth_cred_ref(nxo
->nxo_cred
);
3815 kauth_cred_unref(&nd
->nd_cr
);
3816 nd
->nd_cr
= nxo
->nxo_cred
;
3819 ctx
->vc_ucred
= nd
->nd_cr
;
3824 * nfsrv_vptofh() - convert vnode to file handle for given export
3826 * If the caller is passing in a vnode for a ".." directory entry,
3827 * they can pass a directory NFS file handle (dnfhp) which will be
3828 * checked against the root export file handle. If it matches, we
3829 * refuse to provide the file handle for the out-of-export directory.
3833 struct nfs_export
*nx
,
3835 struct nfs_filehandle
*dnfhp
,
3838 struct nfs_filehandle
*nfhp
)
3841 uint32_t maxfidsize
;
3843 nfhp
->nfh_fhp
= (u_char
*)&nfhp
->nfh_xh
;
3844 nfhp
->nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
3845 nfhp
->nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
3846 nfhp
->nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
3847 nfhp
->nfh_xh
.nxh_flags
= 0;
3848 nfhp
->nfh_xh
.nxh_reserved
= 0;
3850 if (nfsvers
== NFS_VER2
) {
3851 bzero(&nfhp
->nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
3854 /* if directory FH matches export root, return invalid FH */
3855 if (dnfhp
&& nfsrv_fhmatch(dnfhp
, &nx
->nx_fh
)) {
3856 if (nfsvers
== NFS_VER2
) {
3857 nfhp
->nfh_len
= NFSX_V2FH
;
3859 nfhp
->nfh_len
= sizeof(nfhp
->nfh_xh
);
3861 nfhp
->nfh_xh
.nxh_fidlen
= 0;
3862 nfhp
->nfh_xh
.nxh_flags
= htons(NXHF_INVALIDFH
);
3866 if (nfsvers
== NFS_VER2
) {
3867 maxfidsize
= NFSV2_MAX_FID_SIZE
;
3869 maxfidsize
= NFSV3_MAX_FID_SIZE
;
3871 nfhp
->nfh_len
= maxfidsize
;
3873 error
= VFS_VPTOFH(vp
, (int*)&nfhp
->nfh_len
, &nfhp
->nfh_fid
[0], ctx
);
3877 if (nfhp
->nfh_len
> maxfidsize
) {
3880 nfhp
->nfh_xh
.nxh_fidlen
= nfhp
->nfh_len
;
3881 nfhp
->nfh_len
+= sizeof(nfhp
->nfh_xh
);
3882 if ((nfsvers
== NFS_VER2
) && (nfhp
->nfh_len
< NFSX_V2FH
)) {
3883 nfhp
->nfh_len
= NFSX_V2FH
;
3890 * Compare two file handles to see it they're the same.
3891 * Note that we don't use nfh_len because that may include
3892 * padding in an NFSv2 file handle.
3895 nfsrv_fhmatch(struct nfs_filehandle
*fh1
, struct nfs_filehandle
*fh2
)
3897 struct nfs_exphandle
*nxh1
, *nxh2
;
3900 nxh1
= (struct nfs_exphandle
*)fh1
->nfh_fhp
;
3901 nxh2
= (struct nfs_exphandle
*)fh2
->nfh_fhp
;
3902 len1
= sizeof(fh1
->nfh_xh
) + nxh1
->nxh_fidlen
;
3903 len2
= sizeof(fh2
->nfh_xh
) + nxh2
->nxh_fidlen
;
3907 if (bcmp(nxh1
, nxh2
, len1
)) {
3914 * Functions for dealing with active user lists
3918 * Search the hash table for a user node with a matching IP address and uid field.
3919 * If found, the node's tm_last timestamp is updated and the node is returned.
3921 * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned.
3922 * Returns NULL if a new node could not be allocated OR saddr length exceeds sizeof(unode->sock).
3924 * The list's user_mutex lock MUST be held.
3926 struct nfs_user_stat_node
*
3927 nfsrv_get_user_stat_node(struct nfs_active_user_list
*list
, struct sockaddr
*saddr
, uid_t uid
)
3929 struct nfs_user_stat_node
*unode
;
3931 struct nfs_user_stat_hashtbl_head
*head
;
3933 /* seach the hash table */
3934 head
= NFS_USER_STAT_HASH(list
->user_hashtbl
, uid
);
3935 LIST_FOREACH(unode
, head
, hash_link
) {
3936 if ((uid
== unode
->uid
) && (nfs_sockaddr_cmp(saddr
, (struct sockaddr
*)&unode
->sock
) == 0)) {
3937 /* found matching node */
3943 /* found node in the hash table, now update lru position */
3944 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3945 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3947 /* update time stamp */
3949 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3953 if (saddr
->sa_len
> sizeof(((struct nfs_user_stat_node
*)0)->sock
)) {
3954 /* saddr length exceeds maximum value */
3958 if (list
->node_count
< nfsrv_user_stat_max_nodes
) {
3959 /* Allocate a new node */
3960 MALLOC(unode
, struct nfs_user_stat_node
*, sizeof(struct nfs_user_stat_node
),
3961 M_TEMP
, M_WAITOK
| M_ZERO
);
3967 /* increment node count */
3968 OSAddAtomic(1, &nfsrv_user_stat_node_count
);
3971 /* reuse the oldest node in the lru list */
3972 unode
= TAILQ_FIRST(&list
->user_lru
);
3978 /* Remove the node */
3979 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
3980 LIST_REMOVE(unode
, hash_link
);
3983 /* Initialize the node */
3985 bcopy(saddr
, &unode
->sock
, MIN(saddr
->sa_len
, sizeof(unode
->sock
)));
3988 unode
->bytes_read
= 0;
3989 unode
->bytes_written
= 0;
3990 unode
->tm_start
= (uint32_t)now
.tv_sec
;
3991 unode
->tm_last
= (uint32_t)now
.tv_sec
;
3993 /* insert the node */
3994 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
3995 LIST_INSERT_HEAD(head
, unode
, hash_link
);
4001 nfsrv_update_user_stat(struct nfs_export
*nx
, struct nfsrv_descript
*nd
, uid_t uid
, u_int ops
, u_int rd_bytes
, u_int wr_bytes
)
4003 struct nfs_user_stat_node
*unode
;
4004 struct nfs_active_user_list
*ulist
;
4005 struct sockaddr
*saddr
;
4007 if ((!nfsrv_user_stat_enabled
) || (!nx
) || (!nd
) || (!nd
->nd_nam
)) {
4011 saddr
= (struct sockaddr
*)mbuf_data(nd
->nd_nam
);
4013 /* check address family before going any further */
4014 if ((saddr
->sa_family
!= AF_INET
) && (saddr
->sa_family
!= AF_INET6
)) {
4018 ulist
= &nx
->nx_user_list
;
4020 /* lock the active user list */
4021 lck_mtx_lock(&ulist
->user_mutex
);
4023 /* get the user node */
4024 unode
= nfsrv_get_user_stat_node(ulist
, saddr
, uid
);
4027 lck_mtx_unlock(&ulist
->user_mutex
);
4031 /* update counters */
4033 unode
->bytes_read
+= rd_bytes
;
4034 unode
->bytes_written
+= wr_bytes
;
4037 lck_mtx_unlock(&ulist
->user_mutex
);
4040 /* initialize an active user list */
4042 nfsrv_init_user_list(struct nfs_active_user_list
*ulist
)
4046 /* initialize the lru */
4047 TAILQ_INIT(&ulist
->user_lru
);
4049 /* initialize the hash table */
4050 for (i
= 0; i
< NFS_USER_STAT_HASH_SIZE
; i
++) {
4051 LIST_INIT(&ulist
->user_hashtbl
[i
]);
4053 ulist
->node_count
= 0;
4055 lck_mtx_init(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
, LCK_ATTR_NULL
);
4058 /* Free all nodes in an active user list */
4060 nfsrv_free_user_list(struct nfs_active_user_list
*ulist
)
4062 struct nfs_user_stat_node
*unode
;
4068 while ((unode
= TAILQ_FIRST(&ulist
->user_lru
))) {
4069 /* Remove node and free */
4070 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4071 LIST_REMOVE(unode
, hash_link
);
4072 FREE(unode
, M_TEMP
);
4074 /* decrement node count */
4075 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4077 ulist
->node_count
= 0;
4079 lck_mtx_destroy(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
);
4082 /* Reclaim old expired user nodes from active user lists. */
4084 nfsrv_active_user_list_reclaim(void)
4086 struct nfs_exportfs
*nxfs
;
4087 struct nfs_export
*nx
;
4088 struct nfs_active_user_list
*ulist
;
4089 struct nfs_user_stat_hashtbl_head oldlist
;
4090 struct nfs_user_stat_node
*unode
, *unode_next
;
4094 LIST_INIT(&oldlist
);
4096 lck_rw_lock_shared(&nfsrv_export_rwlock
);
4098 tstale
= now
.tv_sec
- nfsrv_user_stat_max_idle_sec
;
4099 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
4100 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
4101 /* Scan through all user nodes of this export */
4102 ulist
= &nx
->nx_user_list
;
4103 lck_mtx_lock(&ulist
->user_mutex
);
4104 for (unode
= TAILQ_FIRST(&ulist
->user_lru
); unode
; unode
= unode_next
) {
4105 unode_next
= TAILQ_NEXT(unode
, lru_link
);
4107 /* check if this node has expired */
4108 if (unode
->tm_last
>= tstale
) {
4112 /* Remove node from the active user list */
4113 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
4114 LIST_REMOVE(unode
, hash_link
);
4116 /* Add node to temp list */
4117 LIST_INSERT_HEAD(&oldlist
, unode
, hash_link
);
4119 /* decrement node count */
4120 OSAddAtomic(-1, &nfsrv_user_stat_node_count
);
4121 ulist
->node_count
--;
4123 /* can unlock this export's list now */
4124 lck_mtx_unlock(&ulist
->user_mutex
);
4127 lck_rw_done(&nfsrv_export_rwlock
);
4129 /* Free expired nodes */
4130 while ((unode
= LIST_FIRST(&oldlist
))) {
4131 LIST_REMOVE(unode
, hash_link
);
4132 FREE(unode
, M_TEMP
);
4137 * Maps errno values to nfs error numbers.
4138 * Use NFSERR_IO as the catch all for ones not specifically defined in
4141 static u_char nfsrv_v2errmap
[] = {
4142 NFSERR_PERM
, NFSERR_NOENT
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4143 NFSERR_NXIO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4144 NFSERR_IO
, NFSERR_IO
, NFSERR_ACCES
, NFSERR_IO
, NFSERR_IO
,
4145 NFSERR_IO
, NFSERR_EXIST
, NFSERR_IO
, NFSERR_NODEV
, NFSERR_NOTDIR
,
4146 NFSERR_ISDIR
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4147 NFSERR_IO
, NFSERR_FBIG
, NFSERR_NOSPC
, NFSERR_IO
, NFSERR_ROFS
,
4148 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4149 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4150 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4151 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4152 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4153 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
4154 NFSERR_IO
, NFSERR_IO
, NFSERR_NAMETOL
, NFSERR_IO
, NFSERR_IO
,
4155 NFSERR_NOTEMPTY
, NFSERR_IO
, NFSERR_IO
, NFSERR_DQUOT
, NFSERR_STALE
,
4159 * Maps errno values to nfs error numbers.
4160 * Although it is not obvious whether or not NFS clients really care if
4161 * a returned error value is in the specified list for the procedure, the
4162 * safest thing to do is filter them appropriately. For Version 2, the
4163 * X/Open XNFS document is the only specification that defines error values
4164 * for each RPC (The RFC simply lists all possible error values for all RPCs),
4165 * so I have decided to not do this for Version 2.
4166 * The first entry is the default error return and the rest are the valid
4167 * errors for that RPC in increasing numeric order.
4169 static short nfsv3err_null
[] = {
4174 static short nfsv3err_getattr
[] = {
4184 static short nfsv3err_setattr
[] = {
4201 static short nfsv3err_lookup
[] = {
4215 static short nfsv3err_access
[] = {
4225 static short nfsv3err_readlink
[] = {
4238 static short nfsv3err_read
[] = {
4251 static short nfsv3err_write
[] = {
4267 static short nfsv3err_create
[] = {
4285 static short nfsv3err_mkdir
[] = {
4303 static short nfsv3err_symlink
[] = {
4321 static short nfsv3err_mknod
[] = {
4340 static short nfsv3err_remove
[] = {
4355 static short nfsv3err_rmdir
[] = {
4374 static short nfsv3err_rename
[] = {
4398 static short nfsv3err_link
[] = {
4419 static short nfsv3err_readdir
[] = {
4433 static short nfsv3err_readdirplus
[] = {
4448 static short nfsv3err_fsstat
[] = {
4458 static short nfsv3err_fsinfo
[] = {
4467 static short nfsv3err_pathconf
[] = {
4476 static short nfsv3err_commit
[] = {
4486 static short *nfsrv_v3errmap
[] = {
4504 nfsv3err_readdirplus
,
4512 * Map errnos to NFS error numbers. For Version 3 also filter out error
4513 * numbers not specified for the associated procedure.
4516 nfsrv_errmap(struct nfsrv_descript
*nd
, int err
)
4518 short *defaulterrp
, *errp
;
4520 if (nd
->nd_vers
== NFS_VER2
) {
4521 if (err
<= (int)sizeof(nfsrv_v2errmap
)) {
4522 return (int)nfsrv_v2errmap
[err
- 1];
4527 if (nd
->nd_procnum
> NFSPROC_COMMIT
) {
4528 return err
& 0xffff;
4530 errp
= defaulterrp
= nfsrv_v3errmap
[nd
->nd_procnum
];
4534 } else if (*errp
> err
) {
4538 return (int)*defaulterrp
;
4541 #endif /* CONFIG_NFS_SERVER */
4543 #endif /* CONFIG_NFS */