2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
65 * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $
69 * These functions support the macros and help fiddle mbuf chains for
70 * the nfs op functions. They do things like create the rpc header and
71 * copy data between mbuf chains and uio lists.
73 #include <sys/param.h>
75 #include <sys/kauth.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/mount_internal.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/kpi_mbuf.h>
81 #include <sys/socket.h>
83 #include <sys/malloc.h>
84 #include <sys/syscall.h>
85 #include <sys/ubc_internal.h>
86 #include <sys/fcntl.h>
87 #include <sys/uio_internal.h>
88 #include <sys/domain.h>
89 #include <libkern/OSAtomic.h>
90 #include <kern/thread_call.h>
93 #include <sys/vmparam.h>
96 #include <kern/clock.h>
98 #include <nfs/rpcv2.h>
99 #include <nfs/nfsproto.h>
101 #include <nfs/nfsnode.h>
102 #include <nfs/xdr_subs.h>
103 #include <nfs/nfsm_subs.h>
104 #include <nfs/nfs_gss.h>
105 #include <nfs/nfsmount.h>
106 #include <nfs/nfs_lock.h>
108 #include <miscfs/specfs/specdev.h>
110 #include <netinet/in.h>
111 #include <net/kpi_interface.h>
116 struct nfsstats nfsstats
;
117 size_t nfs_mbuf_mhlen
= 0, nfs_mbuf_minclsize
= 0;
120 * functions to convert between NFS and VFS types
123 vtonfs_type(enum vtype vtype
, int nfsvers
)
139 if (nfsvers
> NFS_VER2
)
142 if (nfsvers
> NFS_VER2
)
153 nfstov_type(nfstype nvtype
, int nfsvers
)
169 if (nfsvers
> NFS_VER2
)
172 if (nfsvers
> NFS_VER2
)
175 if (nfsvers
> NFS_VER3
)
178 if (nfsvers
> NFS_VER3
)
186 vtonfsv2_mode(enum vtype vtype
, mode_t m
)
189 return vnode_makeimode(VCHR
, m
);
190 return vnode_makeimode(vtype
, m
);
196 * Mapping of old NFS Version 2 RPC numbers to generic numbers.
198 int nfsv3_procid
[NFS_NPROCS
] = {
224 #endif /* NFSSERVER */
227 * and the reverse mapping from generic to Version 2 procedure numbers
229 int nfsv2_procid
[NFS_NPROCS
] = {
257 * initialize NFS's cache of mbuf constants
265 nfs_mbuf_mhlen
= ms
.mhlen
;
266 nfs_mbuf_minclsize
= ms
.minclsize
;
272 * allocate a list of mbufs to hold the given amount of data
275 nfsm_mbuf_get_list(size_t size
, mbuf_t
*mp
, int *mbcnt
)
278 mbuf_t mhead
, mlast
, m
;
282 mhead
= mlast
= NULL
;
286 nfsm_mbuf_get(error
, &m
, (size
- len
));
291 if (mlast
&& ((error
= mbuf_setnext(mlast
, m
)))) {
295 mlen
= mbuf_maxlen(m
);
296 if ((len
+ mlen
) > size
)
298 mbuf_setlen(m
, mlen
);
311 #endif /* NFSSERVER */
314 * nfsm_chain_new_mbuf()
316 * Add a new mbuf to the given chain.
319 nfsm_chain_new_mbuf(struct nfsm_chain
*nmc
, size_t sizehint
)
324 if (nmc
->nmc_flags
& NFSM_CHAIN_FLAG_ADD_CLUSTERS
)
325 sizehint
= nfs_mbuf_minclsize
;
327 /* allocate a new mbuf */
328 nfsm_mbuf_get(error
, &mb
, sizehint
);
332 panic("got NULL mbuf?");
334 /* do we have a current mbuf? */
336 /* first cap off current mbuf */
337 mbuf_setlen(nmc
->nmc_mcur
, nmc
->nmc_ptr
- (caddr_t
)mbuf_data(nmc
->nmc_mcur
));
338 /* then append the new mbuf */
339 error
= mbuf_setnext(nmc
->nmc_mcur
, mb
);
346 /* set up for using the new mbuf */
348 nmc
->nmc_ptr
= mbuf_data(mb
);
349 nmc
->nmc_left
= mbuf_trailingspace(mb
);
355 * nfsm_chain_add_opaque_f()
357 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
360 nfsm_chain_add_opaque_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
362 uint32_t paddedlen
, tlen
;
365 paddedlen
= nfsm_rndup(len
);
368 if (!nmc
->nmc_left
) {
369 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
373 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
378 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
380 bzero(nmc
->nmc_ptr
, tlen
);
382 nmc
->nmc_ptr
+= tlen
;
383 nmc
->nmc_left
-= tlen
;
395 * nfsm_chain_add_opaque_nopad_f()
397 * Add "len" bytes of opaque data pointed to by "buf" to the given chain.
401 nfsm_chain_add_opaque_nopad_f(struct nfsm_chain
*nmc
, const u_char
*buf
, uint32_t len
)
407 if (nmc
->nmc_left
<= 0) {
408 error
= nfsm_chain_new_mbuf(nmc
, len
);
412 tlen
= MIN(nmc
->nmc_left
, len
);
413 bcopy(buf
, nmc
->nmc_ptr
, tlen
);
414 nmc
->nmc_ptr
+= tlen
;
415 nmc
->nmc_left
-= tlen
;
423 * nfsm_chain_add_uio()
425 * Add "len" bytes of data from "uio" to the given chain.
428 nfsm_chain_add_uio(struct nfsm_chain
*nmc
, struct uio
*uiop
, uint32_t len
)
430 uint32_t paddedlen
, tlen
;
433 paddedlen
= nfsm_rndup(len
);
436 if (!nmc
->nmc_left
) {
437 error
= nfsm_chain_new_mbuf(nmc
, paddedlen
);
441 tlen
= MIN(nmc
->nmc_left
, paddedlen
);
446 uiomove(nmc
->nmc_ptr
, tlen
, uiop
);
448 bzero(nmc
->nmc_ptr
, tlen
);
450 nmc
->nmc_ptr
+= tlen
;
451 nmc
->nmc_left
-= tlen
;
461 * Find the length of the NFS mbuf chain
462 * up to the current encoding/decoding offset.
465 nfsm_chain_offset(struct nfsm_chain
*nmc
)
470 for (mb
= nmc
->nmc_mhead
; mb
; mb
= mbuf_next(mb
)) {
471 if (mb
== nmc
->nmc_mcur
)
472 return (len
+ (nmc
->nmc_ptr
- (caddr_t
) mbuf_data(mb
)));
480 * nfsm_chain_advance()
482 * Advance an nfsm_chain by "len" bytes.
485 nfsm_chain_advance(struct nfsm_chain
*nmc
, uint32_t len
)
490 if (nmc
->nmc_left
>= len
) {
491 nmc
->nmc_left
-= len
;
495 len
-= nmc
->nmc_left
;
496 nmc
->nmc_mcur
= mb
= mbuf_next(nmc
->nmc_mcur
);
499 nmc
->nmc_ptr
= mbuf_data(mb
);
500 nmc
->nmc_left
= mbuf_len(mb
);
507 * nfsm_chain_reverse()
509 * Reverse decode offset in an nfsm_chain by "len" bytes.
512 nfsm_chain_reverse(struct nfsm_chain
*nmc
, uint32_t len
)
514 uint32_t mlen
, new_offset
;
517 mlen
= nmc
->nmc_ptr
- (caddr_t
) mbuf_data(nmc
->nmc_mcur
);
520 nmc
->nmc_left
+= len
;
524 new_offset
= nfsm_chain_offset(nmc
) - len
;
525 nfsm_chain_dissect_init(error
, nmc
, nmc
->nmc_mhead
);
529 return (nfsm_chain_advance(nmc
, new_offset
));
533 * nfsm_chain_get_opaque_pointer_f()
535 * Return a pointer to the next "len" bytes of contiguous data in
536 * the mbuf chain. If the next "len" bytes are not contiguous, we
537 * try to manipulate the mbuf chain so that it is.
539 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
542 nfsm_chain_get_opaque_pointer_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
**pptr
)
545 uint32_t left
, need
, mblen
, cplen
, padlen
;
549 /* move to next mbuf with data */
550 while (nmc
->nmc_mcur
&& (nmc
->nmc_left
== 0)) {
551 mb
= mbuf_next(nmc
->nmc_mcur
);
555 nmc
->nmc_ptr
= mbuf_data(mb
);
556 nmc
->nmc_left
= mbuf_len(mb
);
558 /* check if we've run out of data */
562 /* do we already have a contiguous buffer? */
563 if (nmc
->nmc_left
>= len
) {
564 /* the returned pointer will be the current pointer */
565 *pptr
= (u_char
*)nmc
->nmc_ptr
;
566 error
= nfsm_chain_advance(nmc
, nfsm_rndup(len
));
570 padlen
= nfsm_rndup(len
) - len
;
572 /* we need (len - left) more bytes */
573 mbcur
= nmc
->nmc_mcur
;
574 left
= nmc
->nmc_left
;
577 if (need
> mbuf_trailingspace(mbcur
)) {
579 * The needed bytes won't fit in the current mbuf so we'll
580 * allocate a new mbuf to hold the contiguous range of data.
582 nfsm_mbuf_get(error
, &mb
, len
);
585 /* double check that this mbuf can hold all the data */
586 if (mbuf_maxlen(mb
) < len
) {
591 /* the returned pointer will be the new mbuf's data pointer */
592 *pptr
= ptr
= mbuf_data(mb
);
594 /* copy "left" bytes to the new mbuf */
595 bcopy(nmc
->nmc_ptr
, ptr
, left
);
597 mbuf_setlen(mb
, left
);
599 /* insert the new mbuf between the current and next mbufs */
600 error
= mbuf_setnext(mb
, mbuf_next(mbcur
));
602 error
= mbuf_setnext(mbcur
, mb
);
608 /* reduce current mbuf's length by "left" */
609 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - left
);
612 * update nmc's state to point at the end of the mbuf
613 * where the needed data will be copied to.
615 nmc
->nmc_mcur
= mbcur
= mb
;
617 nmc
->nmc_ptr
= (caddr_t
)ptr
;
619 /* The rest of the data will fit in this mbuf. */
621 /* the returned pointer will be the current pointer */
622 *pptr
= (u_char
*)nmc
->nmc_ptr
;
625 * update nmc's state to point at the end of the mbuf
626 * where the needed data will be copied to.
628 nmc
->nmc_ptr
+= left
;
633 * move the next "need" bytes into the current
634 * mbuf from the mbufs that follow
637 /* extend current mbuf length */
638 mbuf_setlen(mbcur
, mbuf_len(mbcur
) + need
);
640 /* mb follows mbufs we're copying/compacting data from */
641 mb
= mbuf_next(mbcur
);
644 /* copy as much as we need/can */
646 mblen
= mbuf_len(mb
);
647 cplen
= MIN(mblen
, need
);
649 bcopy(ptr
, nmc
->nmc_ptr
, cplen
);
651 * update the mbuf's pointer and length to reflect that
652 * the data was shifted to an earlier mbuf in the chain
654 error
= mbuf_setdata(mb
, ptr
+ cplen
, mblen
- cplen
);
656 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
659 /* update pointer/need */
660 nmc
->nmc_ptr
+= cplen
;
663 /* if more needed, go to next mbuf */
668 /* did we run out of data in the mbuf chain? */
670 mbuf_setlen(mbcur
, mbuf_len(mbcur
) - need
);
675 * update nmc's state to point after this contiguous data
677 * "mb" points to the last mbuf we copied data from so we
678 * just set nmc to point at whatever remains in that mbuf.
681 nmc
->nmc_ptr
= mbuf_data(mb
);
682 nmc
->nmc_left
= mbuf_len(mb
);
684 /* move past any padding */
686 error
= nfsm_chain_advance(nmc
, padlen
);
692 * nfsm_chain_get_opaque_f()
694 * Read the next "len" bytes in the chain into "buf".
695 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
698 nfsm_chain_get_opaque_f(struct nfsm_chain
*nmc
, uint32_t len
, u_char
*buf
)
700 uint32_t cplen
, padlen
;
703 padlen
= nfsm_rndup(len
) - len
;
705 /* loop through mbufs copying all the data we need */
706 while (len
&& nmc
->nmc_mcur
) {
707 /* copy as much as we need/can */
708 cplen
= MIN(nmc
->nmc_left
, len
);
710 bcopy(nmc
->nmc_ptr
, buf
, cplen
);
711 nmc
->nmc_ptr
+= cplen
;
712 nmc
->nmc_left
-= cplen
;
716 /* if more needed, go to next mbuf */
718 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
720 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
721 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
725 /* did we run out of data in the mbuf chain? */
730 nfsm_chain_adv(error
, nmc
, padlen
);
736 * nfsm_chain_get_uio()
738 * Read the next "len" bytes in the chain into the given uio.
739 * The nfsm_chain is advanced by nfsm_rndup("len") bytes.
742 nfsm_chain_get_uio(struct nfsm_chain
*nmc
, uint32_t len
, struct uio
*uiop
)
744 uint32_t cplen
, padlen
;
747 padlen
= nfsm_rndup(len
) - len
;
749 /* loop through mbufs copying all the data we need */
750 while (len
&& nmc
->nmc_mcur
) {
751 /* copy as much as we need/can */
752 cplen
= MIN(nmc
->nmc_left
, len
);
754 error
= uiomove(nmc
->nmc_ptr
, cplen
, uiop
);
757 nmc
->nmc_ptr
+= cplen
;
758 nmc
->nmc_left
-= cplen
;
761 /* if more needed, go to next mbuf */
763 mbuf_t mb
= mbuf_next(nmc
->nmc_mcur
);
765 nmc
->nmc_ptr
= mb
? mbuf_data(mb
) : NULL
;
766 nmc
->nmc_left
= mb
? mbuf_len(mb
) : 0;
770 /* did we run out of data in the mbuf chain? */
775 nfsm_chain_adv(error
, nmc
, padlen
);
783 * Add an NFSv2 "sattr" structure to an mbuf chain
786 nfsm_chain_add_v2sattr_f(struct nfsm_chain
*nmc
, struct vnode_attr
*vap
, uint32_t szrdev
)
790 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
,
791 (VATTR_IS_ACTIVE(vap
, va_mode
) ? vap
->va_mode
: 0600)));
792 nfsm_chain_add_32(error
, nmc
,
793 VATTR_IS_ACTIVE(vap
, va_uid
) ? vap
->va_uid
: (uint32_t)-1);
794 nfsm_chain_add_32(error
, nmc
,
795 VATTR_IS_ACTIVE(vap
, va_gid
) ? vap
->va_gid
: (uint32_t)-1);
796 nfsm_chain_add_32(error
, nmc
, szrdev
);
797 nfsm_chain_add_v2time(error
, nmc
,
798 VATTR_IS_ACTIVE(vap
, va_access_time
) ?
799 &vap
->va_access_time
: NULL
);
800 nfsm_chain_add_v2time(error
, nmc
,
801 VATTR_IS_ACTIVE(vap
, va_modify_time
) ?
802 &vap
->va_modify_time
: NULL
);
808 * Add an NFSv3 "sattr" structure to an mbuf chain
811 nfsm_chain_add_v3sattr_f(struct nfsm_chain
*nmc
, struct vnode_attr
*vap
)
815 if (VATTR_IS_ACTIVE(vap
, va_mode
)) {
816 nfsm_chain_add_32(error
, nmc
, TRUE
);
817 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
);
819 nfsm_chain_add_32(error
, nmc
, FALSE
);
821 if (VATTR_IS_ACTIVE(vap
, va_uid
)) {
822 nfsm_chain_add_32(error
, nmc
, TRUE
);
823 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
825 nfsm_chain_add_32(error
, nmc
, FALSE
);
827 if (VATTR_IS_ACTIVE(vap
, va_gid
)) {
828 nfsm_chain_add_32(error
, nmc
, TRUE
);
829 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
831 nfsm_chain_add_32(error
, nmc
, FALSE
);
833 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) {
834 nfsm_chain_add_32(error
, nmc
, TRUE
);
835 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
837 nfsm_chain_add_32(error
, nmc
, FALSE
);
839 if (vap
->va_vaflags
& VA_UTIMES_NULL
) {
840 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
841 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_SERVER
);
843 if (VATTR_IS_ACTIVE(vap
, va_access_time
)) {
844 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
845 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_sec
);
846 nfsm_chain_add_32(error
, nmc
, vap
->va_access_time
.tv_nsec
);
848 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
850 if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) {
851 nfsm_chain_add_32(error
, nmc
, NFS_TIME_SET_TO_CLIENT
);
852 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_sec
);
853 nfsm_chain_add_32(error
, nmc
, vap
->va_modify_time
.tv_nsec
);
855 nfsm_chain_add_32(error
, nmc
, NFS_TIME_DONT_CHANGE
);
864 * nfsm_chain_get_fh_attr()
866 * Get the file handle and attributes from an mbuf chain. (NFSv2/v3)
869 nfsm_chain_get_fh_attr(
870 struct nfsm_chain
*nmc
,
876 struct nfs_vattr
*nvap
)
878 int error
= 0, gotfh
, gotattr
;
882 if (nfsvers
== NFS_VER3
) /* check for file handle */
883 nfsm_chain_get_32(error
, nmc
, gotfh
);
884 if (!error
&& gotfh
) /* get file handle */
885 nfsm_chain_get_fh(error
, nmc
, nfsvers
, fhp
);
888 if (nfsvers
== NFS_VER3
) /* check for file attributes */
889 nfsm_chain_get_32(error
, nmc
, gotattr
);
892 if (!gotfh
) /* skip attributes */
893 nfsm_chain_adv(error
, nmc
, NFSX_V3FATTR
);
894 else /* get attributes */
895 error
= nfs_parsefattr(nmc
, nfsvers
, nvap
);
897 /* we need valid attributes in order to call nfs_nget() */
898 if (nfs3_getattr_rpc(NULL
, NFSTOMP(dnp
), fhp
->fh_data
, fhp
->fh_len
, ctx
, nvap
, xidp
)) {
908 * Get and process NFSv3 WCC data from an mbuf chain
911 nfsm_chain_get_wcc_data_f(
912 struct nfsm_chain
*nmc
,
914 struct timespec
*premtime
,
921 nfsm_chain_get_32(error
, nmc
, flag
);
922 if (!error
&& flag
) {
923 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
924 nfsm_chain_get_32(error
, nmc
, premtime
->tv_sec
);
925 nfsm_chain_get_32(error
, nmc
, premtime
->tv_nsec
);
926 nfsm_chain_adv(error
, nmc
, 2 * NFSX_UNSIGNED
);
928 premtime
->tv_sec
= 0;
929 premtime
->tv_nsec
= 0;
931 nfsm_chain_postop_attr_update_flag(error
, nmc
, np
, *newpostattr
, xidp
);
937 * Build the RPC header and fill in the authorization info.
938 * Returns the head of the mbuf list and the xid.
949 struct nfsmount
*nmp
= req
->r_nmp
;
950 int nfsvers
= nmp
->nm_vers
;
951 int proc
= ((nfsvers
== NFS_VER2
) ? nfsv2_procid
[req
->r_procnum
] : (int)req
->r_procnum
);
952 int auth_type
= (!auth_len
&& !req
->r_cred
) ? RPCAUTH_NULL
: nmp
->nm_auth
;
954 return nfsm_rpchead2(nmp
->nm_sotype
, NFS_PROG
, nfsvers
, proc
,
955 auth_type
, auth_len
, req
->r_cred
, req
, mrest
, xidp
, mreqp
);
959 nfsm_rpchead2(int sotype
, int prog
, int vers
, int proc
, int auth_type
, int auth_len
,
960 kauth_cred_t cred
, struct nfsreq
*req
, mbuf_t mrest
, u_int64_t
*xidp
, mbuf_t
*mreqp
)
963 int error
, i
, grpsiz
, authsiz
, reqlen
;
966 struct nfsm_chain nmreq
;
968 /* allocate the packet */
969 authsiz
= nfsm_rndup(auth_len
);
970 headlen
= authsiz
+ 10 * NFSX_UNSIGNED
;
971 if (sotype
== SOCK_STREAM
) /* also include room for any RPC Record Mark */
972 headlen
+= NFSX_UNSIGNED
;
973 if (headlen
>= nfs_mbuf_minclsize
) {
974 error
= mbuf_getpacket(MBUF_WAITOK
, &mreq
);
976 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mreq
);
978 if (headlen
< nfs_mbuf_mhlen
)
979 mbuf_align_32(mreq
, headlen
);
981 mbuf_align_32(mreq
, 8 * NFSX_UNSIGNED
);
985 /* unable to allocate packet */
986 /* XXX should we keep statistics for these errors? */
991 * If the caller gave us a non-zero XID then use it because
992 * it may be a higher-level resend with a GSSAPI credential.
993 * Otherwise, allocate a new one.
996 lck_mtx_lock(nfs_request_mutex
);
999 * Derive initial xid from system time.
1001 * Note: it's OK if this code inits nfs_xid to 0 (for example,
1002 * due to a broken clock) because we immediately increment it
1003 * and we guarantee to never use xid 0. So, nfs_xid should only
1004 * ever be 0 the first time this function is called.
1007 nfs_xid
= tv
.tv_sec
<< 12;
1009 if (++nfs_xid
== 0) {
1010 /* Skip zero xid if it should ever happen. */
1014 *xidp
= nfs_xid
+ ((u_int64_t
)nfs_xidwrap
<< 32);
1015 lck_mtx_unlock(nfs_request_mutex
);
1018 /* build the header(s) */
1019 nmreq
.nmc_mcur
= nmreq
.nmc_mhead
= mreq
;
1020 nmreq
.nmc_ptr
= mbuf_data(nmreq
.nmc_mcur
);
1021 nmreq
.nmc_left
= mbuf_trailingspace(nmreq
.nmc_mcur
);
1023 /* First, if it's a TCP stream insert space for an RPC record mark */
1024 if (sotype
== SOCK_STREAM
)
1025 nfsm_chain_add_32(error
, &nmreq
, 0);
1027 /* Then the RPC header. */
1028 nfsm_chain_add_32(error
, &nmreq
, (*xidp
& 0xffffffff));
1029 nfsm_chain_add_32(error
, &nmreq
, RPC_CALL
);
1030 nfsm_chain_add_32(error
, &nmreq
, RPC_VER2
);
1031 nfsm_chain_add_32(error
, &nmreq
, prog
);
1032 nfsm_chain_add_32(error
, &nmreq
, vers
);
1033 nfsm_chain_add_32(error
, &nmreq
, proc
);
1036 switch (auth_type
) {
1038 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NULL
); /* auth */
1039 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1040 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NULL
); /* verf */
1041 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1042 nfsm_chain_build_done(error
, &nmreq
);
1045 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_UNIX
);
1046 nfsm_chain_add_32(error
, &nmreq
, authsiz
);
1047 nfsm_chain_add_32(error
, &nmreq
, 0); /* stamp */
1048 nfsm_chain_add_32(error
, &nmreq
, 0); /* zero-length hostname */
1049 nfsm_chain_add_32(error
, &nmreq
, kauth_cred_getuid(cred
)); /* UID */
1050 nfsm_chain_add_32(error
, &nmreq
, cred
->cr_groups
[0]); /* GID */
1051 grpsiz
= (auth_len
>> 2) - 5;
1052 nfsm_chain_add_32(error
, &nmreq
, grpsiz
);/* additional GIDs */
1053 for (i
= 1; i
<= grpsiz
; i
++)
1054 nfsm_chain_add_32(error
, &nmreq
, cred
->cr_groups
[i
]);
1056 /* And the verifier... */
1057 nfsm_chain_add_32(error
, &nmreq
, RPCAUTH_NULL
); /* flavor */
1058 nfsm_chain_add_32(error
, &nmreq
, 0); /* length */
1059 nfsm_chain_build_done(error
, &nmreq
);
1061 /* Append the args mbufs */
1063 error
= mbuf_setnext(nmreq
.nmc_mcur
, mrest
);
1068 error
= nfs_gss_clnt_cred_put(req
, &nmreq
, mrest
);
1069 if (error
== ENEEDAUTH
) {
1071 * Use sec=sys for this user
1074 auth_type
= RPCAUTH_UNIX
;
1080 /* finish setting up the packet */
1082 error
= mbuf_pkthdr_setrcvif(mreq
, 0);
1089 /* Calculate the size of the request */
1091 for (mb
= nmreq
.nmc_mhead
; mb
; mb
= mbuf_next(mb
))
1092 reqlen
+= mbuf_len(mb
);
1094 mbuf_pkthdr_setlen(mreq
, reqlen
);
1097 * If the request goes on a TCP stream,
1098 * set its size in the RPC record mark.
1099 * The record mark count doesn't include itself
1100 * and the last fragment bit is set.
1102 if (sotype
== SOCK_STREAM
)
1103 nfsm_chain_set_recmark(error
, &nmreq
,
1104 (reqlen
- NFSX_UNSIGNED
) | 0x80000000);
1111 * Parse an NFS file attribute structure out of an mbuf chain.
1114 nfs_parsefattr(struct nfsm_chain
*nmc
, int nfsvers
, struct nfs_vattr
*nvap
)
1124 nfsm_chain_get_32(error
, nmc
, vtype
);
1125 nfsm_chain_get_32(error
, nmc
, vmode
);
1128 if (nfsvers
== NFS_VER3
) {
1129 nvap
->nva_type
= nfstov_type(vtype
, nfsvers
);
1132 * The duplicate information returned in fa_type and fa_mode
1133 * is an ambiguity in the NFS version 2 protocol.
1135 * VREG should be taken literally as a regular file. If a
1136 * server intends to return some type information differently
1137 * in the upper bits of the mode field (e.g. for sockets, or
1138 * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
1139 * leave the examination of the mode bits even in the VREG
1140 * case to avoid breakage for bogus servers, but we make sure
1141 * that there are actually type bits set in the upper part of
1142 * fa_mode (and failing that, trust the va_type field).
1144 * NFSv3 cleared the issue, and requires fa_mode to not
1145 * contain any type information (while also introducing
1146 * sockets and FIFOs for fa_type).
1148 vtype
= nfstov_type(vtype
, nfsvers
);
1149 if ((vtype
== VNON
) || ((vtype
== VREG
) && ((vmode
& S_IFMT
) != 0)))
1150 vtype
= IFTOVT(vmode
);
1151 nvap
->nva_type
= vtype
;
1154 nvap
->nva_mode
= (vmode
& 07777);
1156 nfsm_chain_get_32(error
, nmc
, nvap
->nva_nlink
);
1157 nfsm_chain_get_32(error
, nmc
, nvap
->nva_uid
);
1158 nfsm_chain_get_32(error
, nmc
, nvap
->nva_gid
);
1160 if (nfsvers
== NFS_VER3
) {
1161 nfsm_chain_get_64(error
, nmc
, nvap
->nva_size
);
1162 nfsm_chain_get_64(error
, nmc
, nvap
->nva_bytes
);
1163 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata1
);
1164 nfsm_chain_get_32(error
, nmc
, nvap
->nva_rawdev
.specdata2
);
1166 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fsid
.major
);
1167 nvap
->nva_fsid
.minor
= 0;
1168 nfsm_chain_get_64(error
, nmc
, nvap
->nva_fileid
);
1170 nfsm_chain_get_32(error
, nmc
, nvap
->nva_size
);
1171 nfsm_chain_adv(error
, nmc
, NFSX_UNSIGNED
);
1172 nfsm_chain_get_32(error
, nmc
, rdev
);
1174 nvap
->nva_rawdev
.specdata1
= major(rdev
);
1175 nvap
->nva_rawdev
.specdata2
= minor(rdev
);
1176 nfsm_chain_get_32(error
, nmc
, val
); /* blocks */
1178 nvap
->nva_bytes
= val
* NFS_FABLKSIZE
;
1179 nfsm_chain_get_32(error
, nmc
, val
);
1181 nvap
->nva_fsid
.major
= (uint64_t)val
;
1182 nvap
->nva_fsid
.minor
= 0;
1183 nfsm_chain_get_32(error
, nmc
, val
);
1185 nvap
->nva_fileid
= (uint64_t)val
;
1186 /* Really ugly NFSv2 kludge. */
1187 if ((vtype
== VCHR
) && (rdev
== (dev_t
)0xffffffff))
1188 nvap
->nva_type
= VFIFO
;
1190 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1191 nvap
->nva_timesec
[NFSTIME_ACCESS
],
1192 nvap
->nva_timensec
[NFSTIME_ACCESS
]);
1193 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1194 nvap
->nva_timesec
[NFSTIME_MODIFY
],
1195 nvap
->nva_timensec
[NFSTIME_MODIFY
]);
1196 nfsm_chain_get_time(error
, nmc
, nfsvers
,
1197 nvap
->nva_timesec
[NFSTIME_CHANGE
],
1198 nvap
->nva_timensec
[NFSTIME_CHANGE
]);
1204 * Load the attribute cache (that lives in the nfsnode entry) with
1205 * the value pointed to by nvap, unless the file type in the attribute
1206 * cache doesn't match the file type in the nvap, in which case log a
1207 * warning and return ESTALE.
1209 * If the dontshrink flag is set, then it's not safe to call ubc_setsize()
1210 * to shrink the size of the file.
1215 struct nfs_vattr
*nvap
,
1222 struct nfs_vattr
*npnvap
;
1224 if (np
->n_hflag
& NHINIT
) {
1229 mp
= vnode_mount(vp
);
1232 FSDBG_TOP(527, np
, vp
, *xidp
>> 32, *xidp
);
1234 if (!VFSTONFS(mp
)) {
1235 FSDBG_BOT(527, ENXIO
, 1, 0, *xidp
);
1239 if (*xidp
< np
->n_xid
) {
1241 * We have already updated attributes with a response from
1242 * a later request. The attributes we have here are probably
1243 * stale so we drop them (just return). However, our
1244 * out-of-order receipt could be correct - if the requests were
1245 * processed out of order at the server. Given the uncertainty
1246 * we invalidate our cached attributes. *xidp is zeroed here
1247 * to indicate the attributes were dropped - only getattr
1248 * cares - it needs to retry the rpc.
1250 NATTRINVALIDATE(np
);
1251 FSDBG_BOT(527, 0, np
, np
->n_xid
, *xidp
);
1256 if (vp
&& (nvap
->nva_type
!= vnode_vtype(vp
))) {
1258 * The filehandle has changed type on us. This can be
1259 * caused by either the server not having unique filehandles
1260 * or because another client has removed the previous
1261 * filehandle and a new object (of a different type)
1262 * has been created with the same filehandle.
1264 * We can't simply switch the type on the vnode because
1265 * there may be type-specific fields that need to be
1266 * cleaned up or set up.
1268 * So, what should we do with this vnode?
1270 * About the best we can do is log a warning and return
1271 * an error. ESTALE is about the closest error, but it
1272 * is a little strange that we come up with this error
1273 * internally instead of simply passing it through from
1274 * the server. Hopefully, the vnode will be reclaimed
1275 * soon so the filehandle can be reincarnated as the new
1278 printf("nfs loadattrcache vnode changed type, was %d now %d\n",
1279 vnode_vtype(vp
), nvap
->nva_type
);
1280 FSDBG_BOT(527, ESTALE
, 3, 0, *xidp
);
1285 np
->n_attrstamp
= now
.tv_sec
;
1288 npnvap
= &np
->n_vattr
;
1289 bcopy((caddr_t
)nvap
, (caddr_t
)npnvap
, sizeof(*nvap
));
1291 if (nvap
->nva_size
!= np
->n_size
) {
1293 * n_size is protected by the data lock, so we need to
1294 * defer updating it until it's safe. We save the new size
1295 * and set a flag and it'll get updated the next time we get/drop
1296 * the data lock or the next time we do a getattr.
1298 np
->n_newsize
= nvap
->nva_size
;
1299 FSDBG(527, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1300 SET(np
->n_flag
, NUPDATESIZE
);
1301 if (vp
&& (nvap
->nva_type
== VREG
)) {
1302 if (!UBCINFOEXISTS(vp
) || (dontshrink
&& (np
->n_newsize
< np
->n_size
))) {
1303 /* asked not to shrink, so stick with current size */
1304 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001);
1305 nvap
->nva_size
= np
->n_size
;
1306 CLR(np
->n_flag
, NUPDATESIZE
);
1307 NATTRINVALIDATE(np
);
1308 } else if ((np
->n_flag
& NMODIFIED
) && (nvap
->nva_size
< np
->n_size
)) {
1309 /* if we've modified, use larger size */
1310 FSDBG(527, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0002);
1311 nvap
->nva_size
= np
->n_size
;
1312 CLR(np
->n_flag
, NUPDATESIZE
);
1317 if (np
->n_flag
& NCHG
) {
1318 if (np
->n_flag
& NACC
) {
1319 nvap
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1320 nvap
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1322 if (np
->n_flag
& NUPD
) {
1323 nvap
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1324 nvap
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1328 FSDBG_BOT(527, 0, np
, np
->n_size
, *xidp
);
1333 * Calculate the attribute timeout based on
1334 * how recently the file has been modified.
1337 nfs_attrcachetimeout(nfsnode_t np
)
1339 struct nfsmount
*nmp
;
1343 if (!(nmp
= NFSTONMP(np
)))
1346 isdir
= vnode_isdir(NFSTOV(np
));
1348 if ((np
)->n_flag
& NMODIFIED
)
1349 timeo
= isdir
? nmp
->nm_acdirmin
: nmp
->nm_acregmin
;
1351 /* Note that if the client and server clocks are way out of sync, */
1352 /* timeout will probably get clamped to a min or max value */
1354 timeo
= (now
.tv_sec
- (np
)->n_mtime
.tv_sec
) / 10;
1356 if (timeo
< nmp
->nm_acdirmin
)
1357 timeo
= nmp
->nm_acdirmin
;
1358 else if (timeo
> nmp
->nm_acdirmax
)
1359 timeo
= nmp
->nm_acdirmax
;
1361 if (timeo
< nmp
->nm_acregmin
)
1362 timeo
= nmp
->nm_acregmin
;
1363 else if (timeo
> nmp
->nm_acregmax
)
1364 timeo
= nmp
->nm_acregmax
;
1372 * Check the time stamp
1373 * If the cache is valid, copy contents to *nvaper and return 0
1374 * otherwise return an error
1377 nfs_getattrcache(nfsnode_t np
, struct nfs_vattr
*nvaper
, int alreadylocked
)
1379 struct nfs_vattr
*nvap
;
1380 struct timeval nowup
;
1383 if (!alreadylocked
&& nfs_lock(np
, NFS_NODE_LOCK_SHARED
)) {
1384 FSDBG(528, np
, 0, 0xffffff00, ENOENT
);
1385 OSAddAtomic(1, (SInt32
*)&nfsstats
.attrcache_misses
);
1389 if (!NATTRVALID(np
)) {
1392 FSDBG(528, np
, 0, 0xffffff01, ENOENT
);
1393 OSAddAtomic(1, (SInt32
*)&nfsstats
.attrcache_misses
);
1397 timeo
= nfs_attrcachetimeout(np
);
1399 microuptime(&nowup
);
1400 if ((nowup
.tv_sec
- np
->n_attrstamp
) >= timeo
) {
1403 FSDBG(528, np
, 0, 0xffffff02, ENOENT
);
1404 OSAddAtomic(1, (SInt32
*)&nfsstats
.attrcache_misses
);
1408 nvap
= &np
->n_vattr
;
1409 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, 0xcace);
1410 OSAddAtomic(1, (SInt32
*)&nfsstats
.attrcache_hits
);
1412 if (nvap
->nva_size
!= np
->n_size
) {
1414 * n_size is protected by the data lock, so we need to
1415 * defer updating it until it's safe. We save the new size
1416 * and set a flag and it'll get updated the next time we get/drop
1417 * the data lock or the next time we do a getattr.
1419 if (!alreadylocked
) {
1420 /* need to upgrade shared lock to exclusive */
1421 if (lck_rw_lock_shared_to_exclusive(&np
->n_lock
) == FALSE
)
1422 lck_rw_lock_exclusive(&np
->n_lock
);
1424 np
->n_newsize
= nvap
->nva_size
;
1425 FSDBG(528, np
, nvap
->nva_size
, np
->n_size
, (nvap
->nva_type
== VREG
) | (np
->n_flag
& NMODIFIED
? 6 : 4));
1426 SET(np
->n_flag
, NUPDATESIZE
);
1427 if ((nvap
->nva_type
== VREG
) && (np
->n_flag
& NMODIFIED
) &&
1428 (nvap
->nva_size
< np
->n_size
)) {
1429 /* if we've modified, use larger size */
1430 nvap
->nva_size
= np
->n_size
;
1431 CLR(np
->n_flag
, NUPDATESIZE
);
1435 bcopy((caddr_t
)nvap
, (caddr_t
)nvaper
, sizeof(struct nfs_vattr
));
1436 if (np
->n_flag
& NCHG
) {
1437 if (np
->n_flag
& NACC
) {
1438 nvaper
->nva_timesec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_sec
;
1439 nvaper
->nva_timensec
[NFSTIME_ACCESS
] = np
->n_atim
.tv_nsec
;
1441 if (np
->n_flag
& NUPD
) {
1442 nvaper
->nva_timesec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_sec
;
1443 nvaper
->nva_timensec
[NFSTIME_MODIFY
] = np
->n_mtim
.tv_nsec
;
1452 static nfsuint64 nfs_nullcookie
= { { 0, 0 } };
1454 * This function finds the directory cookie that corresponds to the
1455 * logical byte offset given.
1458 nfs_getcookie(nfsnode_t dnp
, off_t off
, int add
)
1460 struct nfsdmap
*dp
, *dp2
;
1463 pos
= off
/ NFS_DIRBLKSIZ
;
1465 return (&nfs_nullcookie
);
1467 dp
= dnp
->n_cookies
.lh_first
;
1470 MALLOC_ZONE(dp
, struct nfsdmap
*, sizeof(struct nfsdmap
),
1471 M_NFSDIROFF
, M_WAITOK
);
1473 return ((nfsuint64
*)0);
1474 dp
->ndm_eocookie
= 0;
1475 LIST_INSERT_HEAD(&dnp
->n_cookies
, dp
, ndm_list
);
1477 return ((nfsuint64
*)0);
1479 while (pos
>= NFSNUMCOOKIES
) {
1480 pos
-= NFSNUMCOOKIES
;
1481 if (dp
->ndm_list
.le_next
) {
1482 if (!add
&& dp
->ndm_eocookie
< NFSNUMCOOKIES
&&
1483 pos
>= dp
->ndm_eocookie
)
1484 return ((nfsuint64
*)0);
1485 dp
= dp
->ndm_list
.le_next
;
1487 MALLOC_ZONE(dp2
, struct nfsdmap
*, sizeof(struct nfsdmap
),
1488 M_NFSDIROFF
, M_WAITOK
);
1490 return ((nfsuint64
*)0);
1491 dp2
->ndm_eocookie
= 0;
1492 LIST_INSERT_AFTER(dp
, dp2
, ndm_list
);
1495 return ((nfsuint64
*)0);
1497 if (pos
>= dp
->ndm_eocookie
) {
1499 dp
->ndm_eocookie
= pos
+ 1;
1501 return ((nfsuint64
*)0);
1503 return (&dp
->ndm_cookies
[pos
]);
1507 * Invalidate cached directory information, except for the actual directory
1508 * blocks (which are invalidated separately).
1509 * Done mainly to avoid the use of stale offset cookies.
1512 nfs_invaldir(nfsnode_t dnp
)
1514 if (vnode_vtype(NFSTOV(dnp
)) != VDIR
) {
1515 printf("nfs: invaldir not dir\n");
1518 dnp
->n_direofoffset
= 0;
1519 dnp
->n_cookieverf
.nfsuquad
[0] = 0;
1520 dnp
->n_cookieverf
.nfsuquad
[1] = 0;
1521 if (dnp
->n_cookies
.lh_first
)
1522 dnp
->n_cookies
.lh_first
->ndm_eocookie
= 0;
1525 #endif /* NFSCLIENT */
1528 * Schedule a callout thread to run an NFS timer function
1529 * interval milliseconds in the future.
1532 nfs_interval_timer_start(thread_call_t call
, int interval
)
1536 clock_interval_to_deadline(interval
, 1000 * 1000, &deadline
);
1537 thread_call_enter_delayed(call
, deadline
);
1543 static void nfsrv_init_user_list(struct nfs_active_user_list
*);
1544 static void nfsrv_free_user_list(struct nfs_active_user_list
*);
1547 * add NFSv3 WCC data to an mbuf chain
1550 nfsm_chain_add_wcc_data_f(
1551 struct nfsrv_descript
*nd
,
1552 struct nfsm_chain
*nmc
,
1554 struct vnode_attr
*prevap
,
1556 struct vnode_attr
*postvap
)
1561 nfsm_chain_add_32(error
, nmc
, FALSE
);
1563 nfsm_chain_add_32(error
, nmc
, TRUE
);
1564 nfsm_chain_add_64(error
, nmc
, prevap
->va_data_size
);
1565 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_modify_time
);
1566 nfsm_chain_add_time(error
, nmc
, NFS_VER3
, &prevap
->va_change_time
);
1568 nfsm_chain_add_postop_attr(error
, nd
, nmc
, postattrerr
, postvap
);
1574 * Extract a lookup path from the given mbufs and store it in
1575 * a newly allocated buffer saved in the given nameidata structure.
1578 nfsm_chain_get_path_namei(
1579 struct nfsm_chain
*nmc
,
1581 struct nameidata
*nip
)
1583 struct componentname
*cnp
= &nip
->ni_cnd
;
1587 if (len
> (MAXPATHLEN
- 1))
1588 return (ENAMETOOLONG
);
1591 * Get a buffer for the name to be translated, and copy the
1592 * name into the buffer.
1594 MALLOC_ZONE(cnp
->cn_pnbuf
, caddr_t
, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1597 cnp
->cn_pnlen
= MAXPATHLEN
;
1598 cnp
->cn_flags
|= HASBUF
;
1600 /* Copy the name from the mbuf list to the string */
1602 nfsm_chain_get_opaque(error
, nmc
, len
, cp
);
1605 cnp
->cn_pnbuf
[len
] = '\0';
1607 /* sanity check the string */
1608 if ((strlen(cp
) != len
) || strchr(cp
, '/'))
1613 FREE_ZONE(cnp
->cn_pnbuf
, MAXPATHLEN
, M_NAMEI
);
1614 cnp
->cn_flags
&= ~HASBUF
;
1616 nip
->ni_pathlen
= len
;
1622 * Set up nameidata for a lookup() call and do it.
1626 struct nfsrv_descript
*nd
,
1628 struct nameidata
*nip
,
1629 struct nfs_filehandle
*nfhp
,
1631 struct nfs_export
**nxp
,
1632 struct nfs_export_options
**nxop
)
1636 struct componentname
*cnp
= &nip
->ni_cnd
;
1642 * Extract and set starting directory.
1644 error
= nfsrv_fhtovp(nfhp
, nd
, &dp
, nxp
, nxop
);
1647 error
= nfsrv_credcheck(nd
, ctx
, *nxp
, *nxop
);
1648 if (error
|| (vnode_vtype(dp
) != VDIR
)) {
1655 nip
->ni_cnd
.cn_context
= ctx
;
1657 if (*nxop
&& ((*nxop
)->nxo_flags
& NX_READONLY
))
1658 cnp
->cn_flags
|= RDONLY
;
1660 cnp
->cn_flags
|= NOCROSSMOUNT
;
1661 cnp
->cn_nameptr
= cnp
->cn_pnbuf
;
1662 nip
->ni_usedvp
= nip
->ni_startdir
= dp
;
1665 * And call lookup() to do the real work
1667 error
= lookup(nip
);
1671 /* Check for encountering a symbolic link */
1672 if (cnp
->cn_flags
& ISSYMLINK
) {
1673 if ((cnp
->cn_flags
& FSNODELOCKHELD
)) {
1674 cnp
->cn_flags
&= ~FSNODELOCKHELD
;
1675 unlock_fsnode(nip
->ni_dvp
, NULL
);
1677 if (cnp
->cn_flags
& (LOCKPARENT
| WANTPARENT
))
1678 vnode_put(nip
->ni_dvp
);
1680 vnode_put(nip
->ni_vp
);
1687 tmppn
= cnp
->cn_pnbuf
;
1688 cnp
->cn_pnbuf
= NULL
;
1689 cnp
->cn_flags
&= ~HASBUF
;
1690 FREE_ZONE(tmppn
, cnp
->cn_pnlen
, M_NAMEI
);
1696 * A fiddled version of m_adj() that ensures null fill to a long
1697 * boundary and only trims off the back end
1700 nfsm_adj(mbuf_t mp
, int len
, int nul
)
1707 * Trim from tail. Scan the mbuf chain,
1708 * calculating its length and finding the last mbuf.
1709 * If the adjustment only affects this mbuf, then just
1710 * adjust and return. Otherwise, rescan and truncate
1711 * after the remaining size.
1718 mnext
= mbuf_next(m
);
1725 mbuf_setlen(m
, mlen
);
1727 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
1728 for (i
= 0; i
< nul
; i
++)
1737 * Correct length for chain is "count".
1738 * Find the mbuf with last data, adjust its length,
1739 * and toss data from remaining mbufs on chain.
1741 for (m
= mp
; m
; m
= mbuf_next(m
)) {
1743 if (mlen
>= count
) {
1745 mbuf_setlen(m
, count
);
1747 cp
= (caddr_t
)mbuf_data(m
) + mlen
- nul
;
1748 for (i
= 0; i
< nul
; i
++)
1755 for (m
= mbuf_next(m
); m
; m
= mbuf_next(m
))
1760 * Trim the header out of the mbuf list and trim off any trailing
1761 * junk so that the mbuf list has only the write data.
1764 nfsm_chain_trim_data(struct nfsm_chain
*nmc
, int len
, int *mlen
)
1766 int cnt
= 0, dlen
, adjust
;
1774 for (m
= nmc
->nmc_mhead
; m
&& (m
!= nmc
->nmc_mcur
); m
= mbuf_next(m
))
1779 /* trim current mbuf */
1780 data
= mbuf_data(m
);
1782 adjust
= nmc
->nmc_ptr
- data
;
1784 if ((dlen
> 0) && (adjust
> 0)) {
1785 if (mbuf_setdata(m
, nmc
->nmc_ptr
, dlen
))
1788 mbuf_setlen(m
, dlen
);
1790 /* skip next len bytes */
1791 for (; m
&& (cnt
< len
); m
= mbuf_next(m
)) {
1795 /* truncate to end of data */
1796 mbuf_setlen(m
, dlen
- (cnt
- len
));
1797 if (m
== nmc
->nmc_mcur
)
1798 nmc
->nmc_left
-= (cnt
- len
);
1805 /* trim any trailing data */
1806 if (m
== nmc
->nmc_mcur
)
1808 for (; m
; m
= mbuf_next(m
))
1815 nfsm_chain_add_fattr(
1816 struct nfsrv_descript
*nd
,
1817 struct nfsm_chain
*nmc
,
1818 struct vnode_attr
*vap
)
1822 // XXX Should we assert here that all fields are supported?
1824 nfsm_chain_add_32(error
, nmc
, vtonfs_type(vap
->va_type
, nd
->nd_vers
));
1825 if (nd
->nd_vers
== NFS_VER3
) {
1826 nfsm_chain_add_32(error
, nmc
, vap
->va_mode
& 07777);
1828 nfsm_chain_add_32(error
, nmc
, vtonfsv2_mode(vap
->va_type
, vap
->va_mode
));
1830 nfsm_chain_add_32(error
, nmc
, vap
->va_nlink
);
1831 nfsm_chain_add_32(error
, nmc
, vap
->va_uid
);
1832 nfsm_chain_add_32(error
, nmc
, vap
->va_gid
);
1833 if (nd
->nd_vers
== NFS_VER3
) {
1834 nfsm_chain_add_64(error
, nmc
, vap
->va_data_size
);
1835 nfsm_chain_add_64(error
, nmc
, vap
->va_data_alloc
);
1836 nfsm_chain_add_32(error
, nmc
, major(vap
->va_rdev
));
1837 nfsm_chain_add_32(error
, nmc
, minor(vap
->va_rdev
));
1838 nfsm_chain_add_64(error
, nmc
, vap
->va_fsid
);
1839 nfsm_chain_add_64(error
, nmc
, vap
->va_fileid
);
1841 nfsm_chain_add_32(error
, nmc
, vap
->va_data_size
);
1842 nfsm_chain_add_32(error
, nmc
, NFS_FABLKSIZE
);
1843 if (vap
->va_type
== VFIFO
)
1844 nfsm_chain_add_32(error
, nmc
, 0xffffffff);
1846 nfsm_chain_add_32(error
, nmc
, vap
->va_rdev
);
1847 nfsm_chain_add_32(error
, nmc
, vap
->va_data_alloc
/ NFS_FABLKSIZE
);
1848 nfsm_chain_add_32(error
, nmc
, vap
->va_fsid
);
1849 nfsm_chain_add_32(error
, nmc
, vap
->va_fileid
);
1851 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_access_time
);
1852 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_modify_time
);
1853 nfsm_chain_add_time(error
, nmc
, nd
->nd_vers
, &vap
->va_change_time
);
1859 nfsm_chain_get_sattr(
1860 struct nfsrv_descript
*nd
,
1861 struct nfsm_chain
*nmc
,
1862 struct vnode_attr
*vap
)
1864 int error
= 0, nullflag
= 0;
1867 struct timespec now
;
1869 if (nd
->nd_vers
== NFS_VER2
) {
1871 * There is/was a bug in the Sun client that puts 0xffff in the mode
1872 * field of sattr when it should put in 0xffffffff. The u_short
1873 * doesn't sign extend. So check the low order 2 bytes for 0xffff.
1875 nfsm_chain_get_32(error
, nmc
, val
);
1876 if ((val
& 0xffff) != 0xffff) {
1877 VATTR_SET(vap
, va_mode
, val
& 07777);
1878 /* save the "type" bits for NFSv2 create */
1879 VATTR_SET(vap
, va_type
, IFTOVT(val
));
1880 VATTR_CLEAR_ACTIVE(vap
, va_type
);
1882 nfsm_chain_get_32(error
, nmc
, val
);
1883 if (val
!= (uint32_t)-1)
1884 VATTR_SET(vap
, va_uid
, val
);
1885 nfsm_chain_get_32(error
, nmc
, val
);
1886 if (val
!= (uint32_t)-1)
1887 VATTR_SET(vap
, va_gid
, val
);
1888 /* save the "size" bits for NFSv2 create (even if they appear unset) */
1889 nfsm_chain_get_32(error
, nmc
, val
);
1890 VATTR_SET(vap
, va_data_size
, val
);
1891 if (val
== (uint32_t)-1)
1892 VATTR_CLEAR_ACTIVE(vap
, va_data_size
);
1893 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
1894 vap
->va_access_time
.tv_sec
,
1895 vap
->va_access_time
.tv_nsec
);
1896 if (vap
->va_access_time
.tv_sec
!= -1)
1897 VATTR_SET_ACTIVE(vap
, va_access_time
);
1898 nfsm_chain_get_time(error
, nmc
, NFS_VER2
,
1899 vap
->va_modify_time
.tv_sec
,
1900 vap
->va_modify_time
.tv_nsec
);
1901 if (vap
->va_modify_time
.tv_sec
!= -1)
1902 VATTR_SET_ACTIVE(vap
, va_modify_time
);
1907 nfsm_chain_get_32(error
, nmc
, val
);
1909 nfsm_chain_get_32(error
, nmc
, val
);
1910 VATTR_SET(vap
, va_mode
, val
& 07777);
1912 nfsm_chain_get_32(error
, nmc
, val
);
1914 nfsm_chain_get_32(error
, nmc
, val
);
1915 VATTR_SET(vap
, va_uid
, val
);
1917 nfsm_chain_get_32(error
, nmc
, val
);
1919 nfsm_chain_get_32(error
, nmc
, val
);
1920 VATTR_SET(vap
, va_gid
, val
);
1922 nfsm_chain_get_32(error
, nmc
, val
);
1924 nfsm_chain_get_64(error
, nmc
, val64
);
1925 VATTR_SET(vap
, va_data_size
, val64
);
1928 nfsm_chain_get_32(error
, nmc
, val
);
1930 case NFS_TIME_SET_TO_CLIENT
:
1931 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
1932 vap
->va_access_time
.tv_sec
,
1933 vap
->va_access_time
.tv_nsec
);
1934 VATTR_SET_ACTIVE(vap
, va_access_time
);
1936 case NFS_TIME_SET_TO_SERVER
:
1937 VATTR_SET(vap
, va_access_time
, now
);
1938 nullflag
= VA_UTIMES_NULL
;
1941 nfsm_chain_get_32(error
, nmc
, val
);
1943 case NFS_TIME_SET_TO_CLIENT
:
1944 nfsm_chain_get_time(error
, nmc
, nd
->nd_vers
,
1945 vap
->va_modify_time
.tv_sec
,
1946 vap
->va_modify_time
.tv_nsec
);
1947 VATTR_SET_ACTIVE(vap
, va_modify_time
);
1949 case NFS_TIME_SET_TO_SERVER
:
1950 VATTR_SET(vap
, va_modify_time
, now
);
1951 vap
->va_vaflags
|= nullflag
;
1959 * Compare two security flavor structs
1962 nfsrv_cmp_secflavs(struct nfs_sec
*sf1
, struct nfs_sec
*sf2
)
1966 if (sf1
->count
!= sf2
->count
)
1968 for (i
= 0; i
< sf1
->count
; i
++)
1969 if (sf1
->flavors
[i
] != sf2
->flavors
[i
])
1975 * Build hash lists of net addresses and hang them off the NFS export.
1976 * Called by nfsrv_export() to set up the lists of export addresses.
1979 nfsrv_hang_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
1981 struct nfs_export_net_args nxna
;
1982 struct nfs_netopt
*no
, *rn_no
;
1983 struct radix_node_head
*rnh
;
1984 struct radix_node
*rn
;
1985 struct sockaddr
*saddr
, *smask
;
1991 struct ucred temp_cred
;
1993 uaddr
= unxa
->nxa_nets
;
1994 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
1995 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
1999 if (nxna
.nxna_flags
& (NX_MAPROOT
|NX_MAPALL
)) {
2000 bzero(&temp_cred
, sizeof(temp_cred
));
2001 temp_cred
.cr_uid
= nxna
.nxna_cred
.cr_uid
;
2002 temp_cred
.cr_ngroups
= nxna
.nxna_cred
.cr_ngroups
;
2003 for (i
=0; i
< nxna
.nxna_cred
.cr_ngroups
&& i
< NGROUPS
; i
++)
2004 temp_cred
.cr_groups
[i
] = nxna
.nxna_cred
.cr_groups
[i
];
2005 cred
= kauth_cred_create(&temp_cred
);
2006 if (!IS_VALID_CRED(cred
))
2012 if (nxna
.nxna_addr
.ss_len
== 0) {
2013 /* No address means this is a default/world export */
2014 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2015 if (IS_VALID_CRED(cred
))
2016 kauth_cred_unref(&cred
);
2019 nx
->nx_flags
|= NX_DEFAULTEXPORT
;
2020 nx
->nx_defopt
.nxo_flags
= nxna
.nxna_flags
;
2021 nx
->nx_defopt
.nxo_cred
= cred
;
2022 bcopy(&nxna
.nxna_sec
, &nx
->nx_defopt
.nxo_sec
, sizeof(struct nfs_sec
));
2027 i
= sizeof(struct nfs_netopt
);
2028 i
+= nxna
.nxna_addr
.ss_len
+ nxna
.nxna_mask
.ss_len
;
2029 MALLOC(no
, struct nfs_netopt
*, i
, M_NETADDR
, M_WAITOK
);
2031 if (IS_VALID_CRED(cred
))
2032 kauth_cred_unref(&cred
);
2035 bzero(no
, sizeof(struct nfs_netopt
));
2036 no
->no_opt
.nxo_flags
= nxna
.nxna_flags
;
2037 no
->no_opt
.nxo_cred
= cred
;
2038 bcopy(&nxna
.nxna_sec
, &no
->no_opt
.nxo_sec
, sizeof(struct nfs_sec
));
2040 saddr
= (struct sockaddr
*)(no
+ 1);
2041 bcopy(&nxna
.nxna_addr
, saddr
, nxna
.nxna_addr
.ss_len
);
2042 if (nxna
.nxna_mask
.ss_len
) {
2043 smask
= (struct sockaddr
*)((caddr_t
)saddr
+ nxna
.nxna_addr
.ss_len
);
2044 bcopy(&nxna
.nxna_mask
, smask
, nxna
.nxna_mask
.ss_len
);
2048 i
= saddr
->sa_family
;
2049 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2051 * Seems silly to initialize every AF when most are not
2052 * used, do so on demand here
2054 for (dom
= domains
; dom
; dom
= dom
->dom_next
)
2055 if (dom
->dom_family
== i
&& dom
->dom_rtattach
) {
2056 dom
->dom_rtattach((void **)&nx
->nx_rtable
[i
],
2060 if ((rnh
= nx
->nx_rtable
[i
]) == 0) {
2061 if (IS_VALID_CRED(cred
))
2062 kauth_cred_unref(&cred
);
2063 _FREE(no
, M_NETADDR
);
2067 rn
= (*rnh
->rnh_addaddr
)((caddr_t
)saddr
, (caddr_t
)smask
, rnh
, no
->no_rnodes
);
2070 * One of the reasons that rnh_addaddr may fail is that
2071 * the entry already exists. To check for this case, we
2072 * look up the entry to see if it is there. If so, we
2073 * do not need to make a new entry but do continue.
2075 * XXX should this be rnh_lookup() instead?
2078 rn
= (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
2079 rn_no
= (struct nfs_netopt
*)rn
;
2080 if (rn
!= 0 && (rn
->rn_flags
& RNF_ROOT
) == 0 &&
2081 (rn_no
->no_opt
.nxo_flags
== nxna
.nxna_flags
) &&
2082 (!nfsrv_cmp_secflavs(&rn_no
->no_opt
.nxo_sec
, &nxna
.nxna_sec
))) {
2083 kauth_cred_t cred2
= rn_no
->no_opt
.nxo_cred
;
2084 if (cred
== cred2
) {
2085 /* creds are same (or both NULL) */
2087 } else if (cred
&& cred2
&& (cred
->cr_uid
== cred2
->cr_uid
) &&
2088 (cred
->cr_ngroups
== cred2
->cr_ngroups
)) {
2089 for (i
=0; i
< cred2
->cr_ngroups
&& i
< NGROUPS
; i
++)
2090 if (cred
->cr_groups
[i
] != cred2
->cr_groups
[i
])
2092 if (i
>= cred2
->cr_ngroups
|| i
>= NGROUPS
)
2096 if (IS_VALID_CRED(cred
))
2097 kauth_cred_unref(&cred
);
2098 _FREE(no
, M_NETADDR
);
2110 * In order to properly track an export's netopt count, we need to pass
2111 * an additional argument to nfsrv_free_netopt() so that it can decrement
2112 * the export's netopt count.
2114 struct nfsrv_free_netopt_arg
{
2116 struct radix_node_head
*rnh
;
2120 nfsrv_free_netopt(struct radix_node
*rn
, void *w
)
2122 struct nfsrv_free_netopt_arg
*fna
= (struct nfsrv_free_netopt_arg
*)w
;
2123 struct radix_node_head
*rnh
= fna
->rnh
;
2124 uint32_t *cnt
= fna
->cnt
;
2125 struct nfs_netopt
*nno
= (struct nfs_netopt
*)rn
;
2127 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2128 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
))
2129 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
2130 _FREE((caddr_t
)rn
, M_NETADDR
);
2136 * Free the net address hash lists that are hanging off the mount points.
2139 nfsrv_free_addrlist(struct nfs_export
*nx
, struct user_nfs_export_args
*unxa
)
2141 struct nfs_export_net_args nxna
;
2142 struct radix_node_head
*rnh
;
2143 struct radix_node
*rn
;
2144 struct nfsrv_free_netopt_arg fna
;
2145 struct nfs_netopt
*nno
;
2150 if (!unxa
|| !unxa
->nxa_netcount
) {
2151 /* delete everything */
2152 for (i
= 0; i
<= AF_MAX
; i
++)
2153 if ( (rnh
= nx
->nx_rtable
[i
]) ) {
2155 fna
.cnt
= &nx
->nx_expcnt
;
2156 (*rnh
->rnh_walktree
)(rnh
, nfsrv_free_netopt
, (caddr_t
)&fna
);
2157 _FREE((caddr_t
)rnh
, M_RTABLE
);
2158 nx
->nx_rtable
[i
] = 0;
2163 /* delete only the exports specified */
2164 uaddr
= unxa
->nxa_nets
;
2165 for (net
= 0; net
< unxa
->nxa_netcount
; net
++, uaddr
+= sizeof(nxna
)) {
2166 error
= copyin(uaddr
, &nxna
, sizeof(nxna
));
2170 if (nxna
.nxna_addr
.ss_len
== 0) {
2171 /* No address means this is a default/world export */
2172 if (nx
->nx_flags
& NX_DEFAULTEXPORT
) {
2173 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2174 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2175 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2182 if ((rnh
= nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
]) == 0) {
2183 /* AF not initialized? */
2184 if (!(unxa
->nxa_flags
& NXA_ADD
))
2185 printf("nfsrv_free_addrlist: address not found (0)\n");
2189 rn
= (*rnh
->rnh_lookup
)(&nxna
.nxna_addr
,
2190 nxna
.nxna_mask
.ss_len
? &nxna
.nxna_mask
: NULL
, rnh
);
2191 if (!rn
|| (rn
->rn_flags
& RNF_ROOT
)) {
2192 if (!(unxa
->nxa_flags
& NXA_ADD
))
2193 printf("nfsrv_free_addrlist: address not found (1)\n");
2197 (*rnh
->rnh_deladdr
)(rn
->rn_key
, rn
->rn_mask
, rnh
);
2198 nno
= (struct nfs_netopt
*)rn
;
2199 if (IS_VALID_CRED(nno
->no_opt
.nxo_cred
))
2200 kauth_cred_unref(&nno
->no_opt
.nxo_cred
);
2201 _FREE((caddr_t
)rn
, M_NETADDR
);
2204 if (nx
->nx_expcnt
== ((nx
->nx_flags
& NX_DEFAULTEXPORT
) ? 1 : 0)) {
2205 /* no more entries in rnh, so free it up */
2206 _FREE((caddr_t
)rnh
, M_RTABLE
);
2207 nx
->nx_rtable
[nxna
.nxna_addr
.ss_family
] = 0;
2214 void enablequotas(struct mount
*mp
, vfs_context_t ctx
); // XXX
2217 nfsrv_export(struct user_nfs_export_args
*unxa
, vfs_context_t ctx
)
2219 int error
= 0, pathlen
;
2220 struct nfs_exportfs
*nxfs
, *nxfs2
, *nxfs3
;
2221 struct nfs_export
*nx
, *nx2
, *nx3
;
2222 struct nfs_filehandle nfh
;
2223 struct nameidata mnd
, xnd
;
2224 vnode_t mvp
= NULL
, xvp
= NULL
;
2226 char path
[MAXPATHLEN
];
2229 if (unxa
->nxa_flags
& NXA_DELETE_ALL
) {
2230 /* delete all exports on all file systems */
2231 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
2232 while ((nxfs
= LIST_FIRST(&nfsrv_exports
))) {
2233 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
2235 vfs_clearflags(mp
, MNT_EXPORTED
);
2236 /* delete all exports on this file system */
2237 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
2238 LIST_REMOVE(nx
, nx_next
);
2239 LIST_REMOVE(nx
, nx_hash
);
2240 /* delete all netopts for this export */
2241 nfsrv_free_addrlist(nx
, NULL
);
2242 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2243 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2244 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2246 /* free active user list for this export */
2247 nfsrv_free_user_list(&nx
->nx_user_list
);
2248 FREE(nx
->nx_path
, M_TEMP
);
2251 LIST_REMOVE(nxfs
, nxfs_next
);
2252 FREE(nxfs
->nxfs_path
, M_TEMP
);
2255 lck_rw_done(&nfsrv_export_rwlock
);
2259 error
= copyinstr(unxa
->nxa_fspath
, path
, MAXPATHLEN
, (size_t *)&pathlen
);
2263 lck_rw_lock_exclusive(&nfsrv_export_rwlock
);
2265 // first check if we've already got an exportfs with the given ID
2266 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
2267 if (nxfs
->nxfs_id
== unxa
->nxa_fsid
)
2271 /* verify exported FS path matches given path */
2272 if (strncmp(path
, nxfs
->nxfs_path
, MAXPATHLEN
)) {
2276 if ((unxa
->nxa_flags
& (NXA_ADD
|NXA_OFFLINE
)) == NXA_ADD
) {
2277 /* if adding, verify that the mount is still what we expect */
2278 mp
= vfs_getvfs_by_mntonname(nxfs
->nxfs_path
);
2279 /* find exported FS root vnode */
2280 NDINIT(&mnd
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
2281 UIO_SYSSPACE
, CAST_USER_ADDR_T(nxfs
->nxfs_path
), ctx
);
2282 error
= namei(&mnd
);
2286 /* make sure it's (still) the root of a file system */
2287 if (!vnode_isvroot(mvp
)) {
2291 /* sanity check: this should be same mount */
2292 if (mp
!= vnode_mount(mvp
)) {
2298 /* no current exported file system with that ID */
2299 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2304 /* find exported FS root vnode */
2305 NDINIT(&mnd
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
,
2306 UIO_SYSSPACE
, CAST_USER_ADDR_T(path
), ctx
);
2307 error
= namei(&mnd
);
2309 if (!(unxa
->nxa_flags
& NXA_OFFLINE
))
2313 /* make sure it's the root of a file system */
2314 if (!vnode_isvroot(mvp
)) {
2315 /* bail if not marked offline */
2316 if (!(unxa
->nxa_flags
& NXA_OFFLINE
)) {
2324 mp
= vnode_mount(mvp
);
2326 /* make sure the file system is NFS-exportable */
2327 nfh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
2328 error
= VFS_VPTOFH(mvp
, (int*)&nfh
.nfh_len
, &nfh
.nfh_fid
[0], NULL
);
2329 if (!error
&& (nfh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
))
2336 /* add an exportfs for it */
2337 MALLOC(nxfs
, struct nfs_exportfs
*, sizeof(struct nfs_exportfs
), M_TEMP
, M_WAITOK
);
2342 bzero(nxfs
, sizeof(struct nfs_exportfs
));
2343 nxfs
->nxfs_id
= unxa
->nxa_fsid
;
2344 MALLOC(nxfs
->nxfs_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
2345 if (!nxfs
->nxfs_path
) {
2350 bcopy(path
, nxfs
->nxfs_path
, pathlen
);
2351 /* insert into list in reverse-sorted order */
2353 LIST_FOREACH(nxfs2
, &nfsrv_exports
, nxfs_next
) {
2354 if (strncmp(nxfs
->nxfs_path
, nxfs2
->nxfs_path
, MAXPATHLEN
) > 0)
2359 LIST_INSERT_BEFORE(nxfs2
, nxfs
, nxfs_next
);
2361 LIST_INSERT_AFTER(nxfs3
, nxfs
, nxfs_next
);
2363 LIST_INSERT_HEAD(&nfsrv_exports
, nxfs
, nxfs_next
);
2365 /* make sure any quotas are enabled before we export the file system */
2367 enablequotas(mp
, ctx
);
2370 if (unxa
->nxa_exppath
) {
2371 error
= copyinstr(unxa
->nxa_exppath
, path
, MAXPATHLEN
, (size_t *)&pathlen
);
2374 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
2375 if (nx
->nx_id
== unxa
->nxa_expid
)
2379 /* verify exported FS path matches given path */
2380 if (strncmp(path
, nx
->nx_path
, MAXPATHLEN
)) {
2385 /* no current export with that ID */
2386 if (!(unxa
->nxa_flags
& NXA_ADD
)) {
2390 /* add an export for it */
2391 MALLOC(nx
, struct nfs_export
*, sizeof(struct nfs_export
), M_TEMP
, M_WAITOK
);
2396 bzero(nx
, sizeof(struct nfs_export
));
2397 nx
->nx_id
= unxa
->nxa_expid
;
2399 microtime(&nx
->nx_exptime
);
2400 MALLOC(nx
->nx_path
, char*, pathlen
, M_TEMP
, M_WAITOK
);
2407 bcopy(path
, nx
->nx_path
, pathlen
);
2408 /* initialize the active user list */
2409 nfsrv_init_user_list(&nx
->nx_user_list
);
2410 /* insert into list in reverse-sorted order */
2412 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
2413 if (strncmp(nx
->nx_path
, nx2
->nx_path
, MAXPATHLEN
) > 0)
2418 LIST_INSERT_BEFORE(nx2
, nx
, nx_next
);
2420 LIST_INSERT_AFTER(nx3
, nx
, nx_next
);
2422 LIST_INSERT_HEAD(&nxfs
->nxfs_exports
, nx
, nx_next
);
2423 /* insert into hash */
2424 LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs
->nxfs_id
, nx
->nx_id
), nx
, nx_hash
);
2427 * We don't allow/support nested exports. Check if the new entry
2428 * nests with the entries before and after or if there's an
2429 * entry for the file system root and subdirs.
2432 if ((nx3
&& !strncmp(nx3
->nx_path
, nx
->nx_path
, pathlen
- 1) &&
2433 (nx3
->nx_path
[pathlen
-1] == '/')) ||
2434 (nx2
&& !strncmp(nx2
->nx_path
, nx
->nx_path
, strlen(nx2
->nx_path
)) &&
2435 (nx
->nx_path
[strlen(nx2
->nx_path
)] == '/')))
2438 /* check export conflict with fs root export and vice versa */
2439 expisroot
= !nx
->nx_path
[0] ||
2440 ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1]);
2441 LIST_FOREACH(nx2
, &nxfs
->nxfs_exports
, nx_next
) {
2445 } else if (!nx2
->nx_path
[0])
2447 else if ((nx2
->nx_path
[0] == '.') && !nx2
->nx_path
[1])
2455 * Don't actually return an error because mountd is
2456 * probably about to delete the conflicting export.
2457 * This can happen when a new export momentarily conflicts
2458 * with an old export while the transition is being made.
2459 * Theoretically, mountd could be written to avoid this
2460 * transient situation - but it would greatly increase the
2461 * complexity of mountd for very little overall benefit.
2463 printf("nfsrv_export: warning: nested exports: %s/%s\n",
2464 nxfs
->nxfs_path
, nx
->nx_path
);
2467 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
2469 /* make sure file handle is set up */
2470 if ((nx
->nx_fh
.nfh_xh
.nxh_version
!= htonl(NFS_FH_VERSION
)) ||
2471 (nx
->nx_fh
.nfh_xh
.nxh_flags
& NXHF_INVALIDFH
)) {
2472 /* try to set up export root file handle */
2473 nx
->nx_fh
.nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
2474 nx
->nx_fh
.nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
2475 nx
->nx_fh
.nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
2476 nx
->nx_fh
.nfh_xh
.nxh_flags
= 0;
2477 nx
->nx_fh
.nfh_xh
.nxh_reserved
= 0;
2478 nx
->nx_fh
.nfh_fhp
= (u_char
*)&nx
->nx_fh
.nfh_xh
;
2479 bzero(&nx
->nx_fh
.nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
2481 /* find export root vnode */
2482 if (!nx
->nx_path
[0] || ((nx
->nx_path
[0] == '.') && !nx
->nx_path
[1])) {
2483 /* exporting file system's root directory */
2487 xnd
.ni_cnd
.cn_nameiop
= LOOKUP
;
2488 xnd
.ni_cnd
.cn_flags
= LOCKLEAF
;
2489 xnd
.ni_pathlen
= pathlen
- 1;
2490 xnd
.ni_cnd
.cn_nameptr
= xnd
.ni_cnd
.cn_pnbuf
= path
;
2491 xnd
.ni_startdir
= mvp
;
2492 xnd
.ni_usedvp
= mvp
;
2493 xnd
.ni_cnd
.cn_context
= ctx
;
2494 error
= lookup(&xnd
);
2500 if (vnode_vtype(xvp
) != VDIR
) {
2506 /* grab file handle */
2507 nx
->nx_fh
.nfh_len
= NFSV3_MAX_FID_SIZE
;
2508 error
= VFS_VPTOFH(xvp
, (int*)&nx
->nx_fh
.nfh_len
, &nx
->nx_fh
.nfh_fid
[0], NULL
);
2509 if (!error
&& (nx
->nx_fh
.nfh_len
> (int)NFSV3_MAX_FID_SIZE
)) {
2512 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= nx
->nx_fh
.nfh_len
;
2513 nx
->nx_fh
.nfh_len
+= sizeof(nx
->nx_fh
.nfh_xh
);
2520 nx
->nx_fh
.nfh_xh
.nxh_flags
= NXHF_INVALIDFH
;
2521 nx
->nx_fh
.nfh_xh
.nxh_fidlen
= 0;
2522 nx
->nx_fh
.nfh_len
= sizeof(nx
->nx_fh
.nfh_xh
);
2529 /* perform the export changes */
2530 if (unxa
->nxa_flags
& NXA_DELETE
) {
2532 /* delete all exports on this file system */
2533 while ((nx
= LIST_FIRST(&nxfs
->nxfs_exports
))) {
2534 LIST_REMOVE(nx
, nx_next
);
2535 LIST_REMOVE(nx
, nx_hash
);
2536 /* delete all netopts for this export */
2537 nfsrv_free_addrlist(nx
, NULL
);
2538 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2539 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2540 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2542 /* delete active user list for this export */
2543 nfsrv_free_user_list(&nx
->nx_user_list
);
2544 FREE(nx
->nx_path
, M_TEMP
);
2548 } else if (!unxa
->nxa_netcount
) {
2549 /* delete all netopts for this export */
2550 nfsrv_free_addrlist(nx
, NULL
);
2551 nx
->nx_flags
&= ~NX_DEFAULTEXPORT
;
2552 if (IS_VALID_CRED(nx
->nx_defopt
.nxo_cred
)) {
2553 kauth_cred_unref(&nx
->nx_defopt
.nxo_cred
);
2556 /* delete only the netopts for the given addresses */
2557 error
= nfsrv_free_addrlist(nx
, unxa
);
2562 if (unxa
->nxa_flags
& NXA_ADD
) {
2564 * If going offline set the export time so that when
2565 * coming back on line we will present a new write verifier
2568 if (unxa
->nxa_flags
& NXA_OFFLINE
)
2569 microtime(&nx
->nx_exptime
);
2571 error
= nfsrv_hang_addrlist(nx
, unxa
);
2573 vfs_setflags(mp
, MNT_EXPORTED
);
2577 if (nx
&& !nx
->nx_expcnt
) {
2578 /* export has no export options */
2579 LIST_REMOVE(nx
, nx_next
);
2580 LIST_REMOVE(nx
, nx_hash
);
2581 /* delete active user list for this export */
2582 nfsrv_free_user_list(&nx
->nx_user_list
);
2583 FREE(nx
->nx_path
, M_TEMP
);
2586 if (LIST_EMPTY(&nxfs
->nxfs_exports
)) {
2587 /* exported file system has no more exports */
2588 LIST_REMOVE(nxfs
, nxfs_next
);
2589 FREE(nxfs
->nxfs_path
, M_TEMP
);
2592 vfs_clearflags(mp
, MNT_EXPORTED
);
2601 lck_rw_done(&nfsrv_export_rwlock
);
2605 static struct nfs_export_options
*
2606 nfsrv_export_lookup(struct nfs_export
*nx
, mbuf_t nam
)
2608 struct nfs_export_options
*nxo
= NULL
;
2609 struct nfs_netopt
*no
= NULL
;
2610 struct radix_node_head
*rnh
;
2611 struct sockaddr
*saddr
;
2613 /* Lookup in the export list first. */
2615 saddr
= mbuf_data(nam
);
2616 rnh
= nx
->nx_rtable
[saddr
->sa_family
];
2618 no
= (struct nfs_netopt
*)
2619 (*rnh
->rnh_matchaddr
)((caddr_t
)saddr
, rnh
);
2620 if (no
&& no
->no_rnodes
->rn_flags
& RNF_ROOT
)
2626 /* If no address match, use the default if it exists. */
2627 if ((nxo
== NULL
) && (nx
->nx_flags
& NX_DEFAULTEXPORT
))
2628 nxo
= &nx
->nx_defopt
;
2632 /* find an export for the given handle */
2633 static struct nfs_export
*
2634 nfsrv_fhtoexport(struct nfs_filehandle
*nfhp
)
2636 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
2637 struct nfs_export
*nx
;
2638 uint32_t fsid
, expid
;
2640 fsid
= ntohl(nxh
->nxh_fsid
);
2641 expid
= ntohl(nxh
->nxh_expid
);
2642 nx
= NFSRVEXPHASH(fsid
, expid
)->lh_first
;
2643 for (; nx
; nx
= LIST_NEXT(nx
, nx_hash
)) {
2644 if (nx
->nx_fs
->nxfs_id
!= fsid
)
2646 if (nx
->nx_id
!= expid
)
2654 * nfsrv_fhtovp() - convert FH to vnode and export info
2658 struct nfs_filehandle
*nfhp
,
2659 struct nfsrv_descript
*nd
,
2661 struct nfs_export
**nxp
,
2662 struct nfs_export_options
**nxop
)
2664 struct nfs_exphandle
*nxh
= (struct nfs_exphandle
*)nfhp
->nfh_fhp
;
2665 struct nfs_export_options
*nxo
;
2680 v
= ntohl(nxh
->nxh_version
);
2681 if (v
!= NFS_FH_VERSION
) {
2682 /* file handle format not supported */
2685 if (nfhp
->nfh_len
> NFSV3_MAX_FH_SIZE
)
2687 if (nfhp
->nfh_len
< (int)sizeof(struct nfs_exphandle
))
2689 v
= ntohs(nxh
->nxh_flags
);
2690 if (v
& NXHF_INVALIDFH
)
2693 *nxp
= nfsrv_fhtoexport(nfhp
);
2697 /* Get the export option structure for this <export, client> tuple. */
2698 *nxop
= nxo
= nfsrv_export_lookup(*nxp
, nam
);
2699 if (nam
&& (*nxop
== NULL
))
2703 /* Validate the security flavor of the request */
2704 for (i
= 0, valid
= 0; i
< nxo
->nxo_sec
.count
; i
++) {
2705 if (nd
->nd_sec
== nxo
->nxo_sec
.flavors
[i
]) {
2712 * RFC 2623 section 2.3.2 recommends no authentication
2713 * requirement for certain NFS procedures used for mounting.
2714 * This allows an unauthenticated superuser on the client
2715 * to do mounts for the benefit of authenticated users.
2717 if (nd
->nd_vers
== NFS_VER2
)
2718 if (nd
->nd_procnum
== NFSV2PROC_GETATTR
||
2719 nd
->nd_procnum
== NFSV2PROC_STATFS
)
2721 if (nd
->nd_vers
== NFS_VER3
)
2722 if (nd
->nd_procnum
== NFSPROC_FSINFO
)
2726 return (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
2730 if (nxo
&& (nxo
->nxo_flags
& NX_OFFLINE
))
2731 return ((nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
);
2733 /* find mount structure */
2734 mp
= vfs_getvfs_by_mntonname((*nxp
)->nx_fs
->nxfs_path
);
2737 * We have an export, but no mount?
2738 * Perhaps the export just hasn't been marked offline yet.
2740 return ((nd
->nd_vers
== NFS_VER2
) ? ESTALE
: NFSERR_TRYLATER
);
2743 fidp
= nfhp
->nfh_fhp
+ sizeof(*nxh
);
2744 error
= VFS_FHTOVP(mp
, nxh
->nxh_fidlen
, fidp
, vpp
, NULL
);
2747 /* vnode pointer should be good at this point or ... */
2754 * nfsrv_credcheck() - check/map credentials according
2755 * to given export options.
2759 struct nfsrv_descript
*nd
,
2761 __unused
struct nfs_export
*nx
,
2762 struct nfs_export_options
*nxo
)
2764 if (nxo
&& nxo
->nxo_cred
) {
2765 if ((nxo
->nxo_flags
& NX_MAPALL
) ||
2766 ((nxo
->nxo_flags
& NX_MAPROOT
) && !suser(nd
->nd_cr
, NULL
))) {
2767 kauth_cred_ref(nxo
->nxo_cred
);
2768 kauth_cred_unref(&nd
->nd_cr
);
2769 nd
->nd_cr
= nxo
->nxo_cred
;
2772 ctx
->vc_ucred
= nd
->nd_cr
;
2777 * nfsrv_vptofh() - convert vnode to file handle for given export
2779 * If the caller is passing in a vnode for a ".." directory entry,
2780 * they can pass a directory NFS file handle (dnfhp) which will be
2781 * checked against the root export file handle. If it matches, we
2782 * refuse to provide the file handle for the out-of-export directory.
2786 struct nfs_export
*nx
,
2788 struct nfs_filehandle
*dnfhp
,
2791 struct nfs_filehandle
*nfhp
)
2794 uint32_t maxfidsize
;
2796 nfhp
->nfh_fhp
= (u_char
*)&nfhp
->nfh_xh
;
2797 nfhp
->nfh_xh
.nxh_version
= htonl(NFS_FH_VERSION
);
2798 nfhp
->nfh_xh
.nxh_fsid
= htonl(nx
->nx_fs
->nxfs_id
);
2799 nfhp
->nfh_xh
.nxh_expid
= htonl(nx
->nx_id
);
2800 nfhp
->nfh_xh
.nxh_flags
= 0;
2801 nfhp
->nfh_xh
.nxh_reserved
= 0;
2803 if (nfsvers
== NFS_VER2
)
2804 bzero(&nfhp
->nfh_fid
[0], NFSV2_MAX_FID_SIZE
);
2806 /* if directory FH matches export root, return invalid FH */
2807 if (dnfhp
&& nfsrv_fhmatch(dnfhp
, &nx
->nx_fh
)) {
2808 if (nfsvers
== NFS_VER2
)
2809 nfhp
->nfh_len
= NFSX_V2FH
;
2811 nfhp
->nfh_len
= sizeof(nfhp
->nfh_xh
);
2812 nfhp
->nfh_xh
.nxh_fidlen
= 0;
2813 nfhp
->nfh_xh
.nxh_flags
= htons(NXHF_INVALIDFH
);
2817 if (nfsvers
== NFS_VER2
)
2818 maxfidsize
= NFSV2_MAX_FID_SIZE
;
2820 maxfidsize
= NFSV3_MAX_FID_SIZE
;
2821 nfhp
->nfh_len
= maxfidsize
;
2823 error
= VFS_VPTOFH(vp
, (int*)&nfhp
->nfh_len
, &nfhp
->nfh_fid
[0], ctx
);
2826 if (nfhp
->nfh_len
> maxfidsize
)
2828 nfhp
->nfh_xh
.nxh_fidlen
= nfhp
->nfh_len
;
2829 nfhp
->nfh_len
+= sizeof(nfhp
->nfh_xh
);
2830 if ((nfsvers
== NFS_VER2
) && (nfhp
->nfh_len
< NFSX_V2FH
))
2831 nfhp
->nfh_len
= NFSX_V2FH
;
2837 * Compare two file handles to see it they're the same.
2838 * Note that we don't use nfh_len because that may include
2839 * padding in an NFSv2 file handle.
2842 nfsrv_fhmatch(struct nfs_filehandle
*fh1
, struct nfs_filehandle
*fh2
)
2844 struct nfs_exphandle
*nxh1
, *nxh2
;
2847 nxh1
= (struct nfs_exphandle
*)fh1
->nfh_fhp
;
2848 nxh2
= (struct nfs_exphandle
*)fh2
->nfh_fhp
;
2849 len1
= sizeof(fh1
->nfh_xh
) + nxh1
->nxh_fidlen
;
2850 len2
= sizeof(fh2
->nfh_xh
) + nxh2
->nxh_fidlen
;
2853 if (bcmp(nxh1
, nxh2
, len1
))
2859 * Functions for dealing with active user lists
2863 * Compare address fields of two sockaddr_storage structures.
2864 * Returns zero if they match.
2867 nfsrv_cmp_sockaddr(struct sockaddr_storage
*sock1
, struct sockaddr_storage
*sock2
)
2869 struct sockaddr_in
*ipv4_sock1
, *ipv4_sock2
;
2870 struct sockaddr_in6
*ipv6_sock1
, *ipv6_sock2
;
2872 /* check for valid parameters */
2873 if (sock1
== NULL
|| sock2
== NULL
)
2876 /* check address length */
2877 if (sock1
->ss_len
!= sock2
->ss_len
)
2880 /* Check address family */
2881 if (sock1
->ss_family
!= sock2
->ss_family
)
2884 if (sock1
->ss_family
== AF_INET
) {
2886 ipv4_sock1
= (struct sockaddr_in
*)sock1
;
2887 ipv4_sock2
= (struct sockaddr_in
*)sock2
;
2889 if (!bcmp(&ipv4_sock1
->sin_addr
, &ipv4_sock2
->sin_addr
, sizeof(struct in_addr
)))
2893 ipv6_sock1
= (struct sockaddr_in6
*)sock1
;
2894 ipv6_sock2
= (struct sockaddr_in6
*)sock2
;
2896 if (!bcmp(&ipv6_sock1
->sin6_addr
, &ipv6_sock2
->sin6_addr
, sizeof(struct in6_addr
)))
2903 * Search the hash table for a user node with a matching IP address and uid field.
2904 * If found, the node's tm_last timestamp is updated and the node is returned.
2906 * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned.
2907 * Returns NULL if a new node could not be allcoated.
2909 * The list's user_mutex lock MUST be held.
2911 static struct nfs_user_stat_node
*
2912 nfsrv_get_user_stat_node(struct nfs_active_user_list
*list
, struct sockaddr_storage
*sock
, uid_t uid
)
2914 struct nfs_user_stat_node
*unode
;
2916 struct nfs_user_stat_hashtbl_head
*head
;
2918 /* seach the hash table */
2919 head
= NFS_USER_STAT_HASH(list
->user_hashtbl
, uid
);
2920 LIST_FOREACH(unode
, head
, hash_link
) {
2921 if (uid
== unode
->uid
&& nfsrv_cmp_sockaddr(sock
, &unode
->sock
) == 0) {
2922 /* found matching node */
2928 /* found node in the hash table, now update lru position */
2929 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
2930 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
2932 /* update time stamp */
2934 unode
->tm_last
= (uint32_t)now
.tv_sec
;
2938 if (list
->node_count
< nfsrv_user_stat_max_nodes
) {
2939 /* Allocate a new node */
2940 MALLOC(unode
, struct nfs_user_stat_node
*, sizeof(struct nfs_user_stat_node
),
2941 M_TEMP
, M_WAITOK
| M_ZERO
);
2946 /* increment node count */
2947 OSAddAtomic(1, (SInt32
*)&nfsrv_user_stat_node_count
);
2950 /* reuse the oldest node in the lru list */
2951 unode
= TAILQ_FIRST(&list
->user_lru
);
2956 /* Remove the node */
2957 TAILQ_REMOVE(&list
->user_lru
, unode
, lru_link
);
2958 LIST_REMOVE(unode
, hash_link
);
2961 /* Initialize the node */
2963 bcopy(sock
, &unode
->sock
, sock
->ss_len
);
2966 unode
->bytes_read
= 0;
2967 unode
->bytes_written
= 0;
2968 unode
->tm_start
= (uint32_t)now
.tv_sec
;
2969 unode
->tm_last
= (uint32_t)now
.tv_sec
;
2971 /* insert the node */
2972 TAILQ_INSERT_TAIL(&list
->user_lru
, unode
, lru_link
);
2973 LIST_INSERT_HEAD(head
, unode
, hash_link
);
2979 nfsrv_update_user_stat(struct nfs_export
*nx
, struct nfsrv_descript
*nd
, uid_t uid
, u_int ops
, u_int rd_bytes
, u_int wr_bytes
)
2981 struct nfs_user_stat_node
*unode
;
2982 struct nfs_active_user_list
*ulist
;
2983 struct sockaddr_storage
*sock_stor
;
2985 if ((!nfsrv_user_stat_enabled
) || (!nx
) || (!nd
) || (!nd
->nd_nam
))
2988 sock_stor
= (struct sockaddr_storage
*)mbuf_data(nd
->nd_nam
);
2990 /* check address family before going any further */
2991 if ((sock_stor
->ss_family
!= AF_INET
) && (sock_stor
->ss_family
!= AF_INET6
))
2994 ulist
= &nx
->nx_user_list
;
2996 /* lock the active user list */
2997 lck_mtx_lock(&ulist
->user_mutex
);
2999 /* get the user node */
3000 unode
= nfsrv_get_user_stat_node(ulist
, sock_stor
, uid
);
3003 lck_mtx_unlock(&ulist
->user_mutex
);
3007 /* update counters */
3009 unode
->bytes_read
+= rd_bytes
;
3010 unode
->bytes_written
+= wr_bytes
;
3013 lck_mtx_unlock(&ulist
->user_mutex
);
3016 /* initialize an active user list */
3018 nfsrv_init_user_list(struct nfs_active_user_list
*ulist
)
3022 /* initialize the lru */
3023 TAILQ_INIT(&ulist
->user_lru
);
3025 /* initialize the hash table */
3026 for(i
= 0; i
< NFS_USER_STAT_HASH_SIZE
; i
++)
3027 LIST_INIT(&ulist
->user_hashtbl
[i
]);
3028 ulist
->node_count
= 0;
3030 lck_mtx_init(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
, LCK_ATTR_NULL
);
3033 /* Free all nodes in an active user list */
3035 nfsrv_free_user_list(struct nfs_active_user_list
*ulist
)
3037 struct nfs_user_stat_node
*unode
;
3042 while ((unode
= TAILQ_FIRST(&ulist
->user_lru
))) {
3043 /* Remove node and free */
3044 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
3045 LIST_REMOVE(unode
, hash_link
);
3046 FREE(unode
, M_TEMP
);
3048 /* decrement node count */
3049 OSAddAtomic(-1, (SInt32
*)&nfsrv_user_stat_node_count
);
3051 ulist
->node_count
= 0;
3053 lck_mtx_destroy(&ulist
->user_mutex
, nfsrv_active_user_mutex_group
);
3056 /* Reclaim old expired user nodes from active user lists. */
3058 nfsrv_active_user_list_reclaim(void)
3060 struct nfs_exportfs
*nxfs
;
3061 struct nfs_export
*nx
;
3062 struct nfs_active_user_list
*ulist
;
3063 struct nfs_user_stat_hashtbl_head oldlist
;
3064 struct nfs_user_stat_node
*unode
, *unode_next
;
3068 LIST_INIT(&oldlist
);
3070 lck_rw_lock_shared(&nfsrv_export_rwlock
);
3072 tstale
= now
.tv_sec
- nfsrv_user_stat_max_idle_sec
;
3073 LIST_FOREACH(nxfs
, &nfsrv_exports
, nxfs_next
) {
3074 LIST_FOREACH(nx
, &nxfs
->nxfs_exports
, nx_next
) {
3075 /* Scan through all user nodes of this export */
3076 ulist
= &nx
->nx_user_list
;
3077 lck_mtx_lock(&ulist
->user_mutex
);
3078 for (unode
= TAILQ_FIRST(&ulist
->user_lru
); unode
; unode
= unode_next
) {
3079 unode_next
= TAILQ_NEXT(unode
, lru_link
);
3081 /* check if this node has expired */
3082 if (unode
->tm_last
>= tstale
)
3085 /* Remove node from the active user list */
3086 TAILQ_REMOVE(&ulist
->user_lru
, unode
, lru_link
);
3087 LIST_REMOVE(unode
, hash_link
);
3089 /* Add node to temp list */
3090 LIST_INSERT_HEAD(&oldlist
, unode
, hash_link
);
3092 /* decrement node count */
3093 OSAddAtomic(-1, (SInt32
*)&nfsrv_user_stat_node_count
);
3094 ulist
->node_count
--;
3096 /* can unlock this export's list now */
3097 lck_mtx_unlock(&ulist
->user_mutex
);
3100 lck_rw_done(&nfsrv_export_rwlock
);
3102 /* Free expired nodes */
3103 while ((unode
= LIST_FIRST(&oldlist
))) {
3104 LIST_REMOVE(unode
, hash_link
);
3105 FREE(unode
, M_TEMP
);
3110 * Maps errno values to nfs error numbers.
3111 * Use NFSERR_IO as the catch all for ones not specifically defined in
3114 static u_char nfsrv_v2errmap
[] = {
3115 NFSERR_PERM
, NFSERR_NOENT
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3116 NFSERR_NXIO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3117 NFSERR_IO
, NFSERR_IO
, NFSERR_ACCES
, NFSERR_IO
, NFSERR_IO
,
3118 NFSERR_IO
, NFSERR_EXIST
, NFSERR_IO
, NFSERR_NODEV
, NFSERR_NOTDIR
,
3119 NFSERR_ISDIR
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3120 NFSERR_IO
, NFSERR_FBIG
, NFSERR_NOSPC
, NFSERR_IO
, NFSERR_ROFS
,
3121 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3122 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3123 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3124 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3125 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3126 NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
, NFSERR_IO
,
3127 NFSERR_IO
, NFSERR_IO
, NFSERR_NAMETOL
, NFSERR_IO
, NFSERR_IO
,
3128 NFSERR_NOTEMPTY
, NFSERR_IO
, NFSERR_IO
, NFSERR_DQUOT
, NFSERR_STALE
,
3132 * Maps errno values to nfs error numbers.
3133 * Although it is not obvious whether or not NFS clients really care if
3134 * a returned error value is in the specified list for the procedure, the
3135 * safest thing to do is filter them appropriately. For Version 2, the
3136 * X/Open XNFS document is the only specification that defines error values
3137 * for each RPC (The RFC simply lists all possible error values for all RPCs),
3138 * so I have decided to not do this for Version 2.
3139 * The first entry is the default error return and the rest are the valid
3140 * errors for that RPC in increasing numeric order.
3142 static short nfsv3err_null
[] = {
3147 static short nfsv3err_getattr
[] = {
3157 static short nfsv3err_setattr
[] = {
3174 static short nfsv3err_lookup
[] = {
3188 static short nfsv3err_access
[] = {
3198 static short nfsv3err_readlink
[] = {
3211 static short nfsv3err_read
[] = {
3224 static short nfsv3err_write
[] = {
3240 static short nfsv3err_create
[] = {
3258 static short nfsv3err_mkdir
[] = {
3276 static short nfsv3err_symlink
[] = {
3294 static short nfsv3err_mknod
[] = {
3313 static short nfsv3err_remove
[] = {
3328 static short nfsv3err_rmdir
[] = {
3347 static short nfsv3err_rename
[] = {
3371 static short nfsv3err_link
[] = {
3392 static short nfsv3err_readdir
[] = {
3406 static short nfsv3err_readdirplus
[] = {
3421 static short nfsv3err_fsstat
[] = {
3431 static short nfsv3err_fsinfo
[] = {
3440 static short nfsv3err_pathconf
[] = {
3449 static short nfsv3err_commit
[] = {
3459 static short *nfsrv_v3errmap
[] = {
3477 nfsv3err_readdirplus
,
3485 * Map errnos to NFS error numbers. For Version 3 also filter out error
3486 * numbers not specified for the associated procedure.
3489 nfsrv_errmap(struct nfsrv_descript
*nd
, int err
)
3491 short *defaulterrp
, *errp
;
3493 if (nd
->nd_vers
== NFS_VER2
) {
3494 if (err
<= (int)sizeof(nfsrv_v2errmap
))
3495 return ((int)nfsrv_v2errmap
[err
- 1]);
3499 if (nd
->nd_procnum
> NFSPROC_COMMIT
)
3500 return (err
& 0xffff);
3501 errp
= defaulterrp
= nfsrv_v3errmap
[nd
->nd_procnum
];
3505 else if (*errp
> err
)
3508 return ((int)*defaulterrp
);
3511 #endif /* NFSSERVER */