2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
68 #include <nfs/nfs_conf.h>
72 * Socket operations for use by nfs
75 #include <sys/param.h>
76 #include <sys/systm.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
93 #include <sys/reboot.h>
95 #include <kern/clock.h>
96 #include <kern/task.h>
97 #include <kern/thread.h>
98 #include <kern/thread_call.h>
100 #include <sys/acct.h>
102 #include <netinet/in.h>
103 #include <netinet/tcp.h>
105 #include <nfs/rpcv2.h>
106 #include <nfs/krpc.h>
107 #include <nfs/nfsproto.h>
109 #include <nfs/xdr_subs.h>
110 #include <nfs/nfsm_subs.h>
111 #include <nfs/nfs_gss.h>
112 #include <nfs/nfsmount.h>
113 #include <nfs/nfsnode.h>
115 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
116 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
119 #define SUN_LEN(su) \
120 (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
124 boolean_t
current_thread_aborted(void);
125 kern_return_t
thread_terminate(thread_t
);
127 ZONE_DECLARE(nfs_fhandle_zone
, "fhandle", sizeof(struct fhandle
), ZC_NONE
);
128 ZONE_DECLARE(nfs_req_zone
, "NFS req", sizeof(struct nfsreq
), ZC_NONE
);
129 ZONE_DECLARE(nfsrv_descript_zone
, "NFSV3 srvdesc",
130 sizeof(struct nfsrv_descript
), ZC_NONE
);
133 #if CONFIG_NFS_SERVER
134 int nfsrv_sock_max_rec_queue_length
= 128; /* max # RPC records queued on (UDP) socket */
136 int nfsrv_getstream(struct nfsrv_sock
*, int);
137 int nfsrv_getreq(struct nfsrv_descript
*);
138 extern int nfsv3_procid
[NFS_NPROCS
];
139 #endif /* CONFIG_NFS_SERVER */
142 * compare two sockaddr structures
145 nfs_sockaddr_cmp(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
153 if (sa1
->sa_family
!= sa2
->sa_family
) {
154 return (sa1
->sa_family
< sa2
->sa_family
) ? -1 : 1;
156 if (sa1
->sa_len
!= sa2
->sa_len
) {
157 return (sa1
->sa_len
< sa2
->sa_len
) ? -1 : 1;
159 if (sa1
->sa_family
== AF_INET
) {
160 return bcmp(&((struct sockaddr_in
*)sa1
)->sin_addr
,
161 &((struct sockaddr_in
*)sa2
)->sin_addr
, sizeof(((struct sockaddr_in
*)sa1
)->sin_addr
));
163 if (sa1
->sa_family
== AF_INET6
) {
164 return bcmp(&((struct sockaddr_in6
*)sa1
)->sin6_addr
,
165 &((struct sockaddr_in6
*)sa2
)->sin6_addr
, sizeof(((struct sockaddr_in6
*)sa1
)->sin6_addr
));
170 #if CONFIG_NFS_CLIENT
172 int nfs_connect_search_new_socket(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
173 int nfs_connect_search_socket_connect(struct nfsmount
*, struct nfs_socket
*, int);
174 int nfs_connect_search_ping(struct nfsmount
*, struct nfs_socket
*, struct timeval
*);
175 void nfs_connect_search_socket_found(struct nfsmount
*, struct nfs_socket_search
*, struct nfs_socket
*);
176 void nfs_connect_search_socket_reap(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
177 int nfs_connect_search_check(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
178 int nfs_reconnect(struct nfsmount
*);
179 int nfs_connect_setup(struct nfsmount
*);
180 void nfs_mount_sock_thread(void *, wait_result_t
);
181 void nfs_udp_rcv(socket_t
, void*, int);
182 void nfs_tcp_rcv(socket_t
, void*, int);
183 void nfs_sock_poke(struct nfsmount
*);
184 void nfs_request_match_reply(struct nfsmount
*, mbuf_t
);
185 void nfs_reqdequeue(struct nfsreq
*);
186 void nfs_reqbusy(struct nfsreq
*);
187 struct nfsreq
*nfs_reqnext(struct nfsreq
*);
188 int nfs_wait_reply(struct nfsreq
*);
189 void nfs_softterm(struct nfsreq
*);
190 int nfs_can_squish(struct nfsmount
*);
191 int nfs_is_squishy(struct nfsmount
*);
192 int nfs_is_dead(int, struct nfsmount
*);
195 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
196 * Use the mean and mean deviation of rtt for the appropriate type of rpc
197 * for the frequent rpcs and a default for the others.
198 * The justification for doing "other" this way is that these rpcs
199 * happen so infrequently that timer est. would probably be stale.
200 * Also, since many of these rpcs are
201 * non-idempotent, a conservative timeout is desired.
202 * getattr, lookup - A+2D
206 #define NFS_RTO(n, t) \
207 ((t) == 0 ? (n)->nm_timeo : \
209 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
210 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
211 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
212 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
215 * Defines which timer to use for the procnum.
222 static const int proct
[] = {
224 [NFSPROC_GETATTR
] = 1,
225 [NFSPROC_SETATTR
] = 0,
226 [NFSPROC_LOOKUP
] = 2,
227 [NFSPROC_ACCESS
] = 1,
228 [NFSPROC_READLINK
] = 3,
231 [NFSPROC_CREATE
] = 0,
233 [NFSPROC_SYMLINK
] = 0,
235 [NFSPROC_REMOVE
] = 0,
237 [NFSPROC_RENAME
] = 0,
239 [NFSPROC_READDIR
] = 3,
240 [NFSPROC_READDIRPLUS
] = 3,
241 [NFSPROC_FSSTAT
] = 0,
242 [NFSPROC_FSINFO
] = 0,
243 [NFSPROC_PATHCONF
] = 0,
244 [NFSPROC_COMMIT
] = 0,
249 * There is a congestion window for outstanding rpcs maintained per mount
250 * point. The cwnd size is adjusted in roughly the way that:
251 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
252 * SIGCOMM '88". ACM, August 1988.
253 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
254 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
255 * of rpcs is in progress.
256 * (The sent count and cwnd are scaled for integer arith.)
257 * Variants of "slow start" were tried and were found to be too much of a
258 * performance hit (ave. rtt 3 times larger),
259 * I suspect due to the large rtt that nfs rpcs have.
261 #define NFS_CWNDSCALE 256
262 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
263 static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
266 * Increment location index to next address/server/location.
269 nfs_location_next(struct nfs_fs_locations
*nlp
, struct nfs_location_index
*nlip
)
271 uint8_t loc
= nlip
->nli_loc
;
272 uint8_t serv
= nlip
->nli_serv
;
273 uint8_t addr
= nlip
->nli_addr
;
275 /* move to next address */
277 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
278 /* no more addresses on current server, go to first address of next server */
282 if (serv
>= nlp
->nl_locations
[loc
]->nl_servcount
) {
283 /* no more servers on current location, go to first server of next location */
286 if (loc
>= nlp
->nl_numlocs
) {
287 loc
= 0; /* after last location, wrap back around to first location */
292 * It's possible for this next server to not have any addresses.
293 * Check for that here and go to the next server.
294 * But bail out if we've managed to come back around to the original
295 * location that was passed in. (That would mean no servers had any
296 * addresses. And we don't want to spin here forever.)
298 if ((loc
== nlip
->nli_loc
) && (serv
== nlip
->nli_serv
) && (addr
== nlip
->nli_addr
)) {
301 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
306 nlip
->nli_serv
= serv
;
307 nlip
->nli_addr
= addr
;
311 * Compare two location indices.
314 nfs_location_index_cmp(struct nfs_location_index
*nlip1
, struct nfs_location_index
*nlip2
)
316 if (nlip1
->nli_loc
!= nlip2
->nli_loc
) {
317 return nlip1
->nli_loc
- nlip2
->nli_loc
;
319 if (nlip1
->nli_serv
!= nlip2
->nli_serv
) {
320 return nlip1
->nli_serv
- nlip2
->nli_serv
;
322 return nlip1
->nli_addr
- nlip2
->nli_addr
;
326 * Get the mntfromname (or path portion only) for a given location.
329 nfs_location_mntfromname(struct nfs_fs_locations
*locs
, struct nfs_location_index idx
, char *s
, size_t size
, int pathonly
)
331 struct nfs_fs_location
*fsl
= locs
->nl_locations
[idx
.nli_loc
];
337 char *name
= fsl
->nl_servers
[idx
.nli_serv
]->ns_name
;
342 if (*fsl
->nl_servers
[idx
.nli_serv
]->ns_addresses
[idx
.nli_addr
]) {
343 name
= fsl
->nl_servers
[idx
.nli_serv
]->ns_addresses
[idx
.nli_addr
];
345 cnt
= scnprintf(p
, size
, "<%s>:", name
);
347 cnt
= scnprintf(p
, size
, "%s:", name
);
352 if (fsl
->nl_path
.np_compcount
== 0) {
353 /* mounting root export on server */
360 /* append each server path component */
361 for (i
= 0; (size
> 0) && (i
< (int)fsl
->nl_path
.np_compcount
); i
++) {
362 cnt
= scnprintf(p
, size
, "/%s", fsl
->nl_path
.np_components
[i
]);
369 * NFS client connect socket upcall.
370 * (Used only during socket connect/search.)
373 nfs_connect_upcall(socket_t so
, void *arg
, __unused
int waitflag
)
375 struct nfs_socket
*nso
= arg
;
378 int error
= 0, recv
= 1;
380 if (nso
->nso_flags
& NSO_CONNECTING
) {
381 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso
, nso
->nso_flags
);
382 wakeup(nso
->nso_wake
);
386 lck_mtx_lock(&nso
->nso_lock
);
387 if ((nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) || !(nso
->nso_flags
& NSO_PINGING
)) {
388 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso
);
389 lck_mtx_unlock(&nso
->nso_lock
);
392 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso
, nso
->nso_flags
);
393 nso
->nso_flags
|= NSO_UPCALL
;
395 /* loop while we make error-free progress */
396 while (!error
&& recv
) {
397 /* make sure we're still interested in this socket */
398 if (nso
->nso_flags
& (NSO_DISCONNECTING
| NSO_DEAD
)) {
401 lck_mtx_unlock(&nso
->nso_lock
);
403 if (nso
->nso_sotype
== SOCK_STREAM
) {
404 error
= nfs_rpc_record_read(so
, &nso
->nso_rrs
, MSG_DONTWAIT
, &recv
, &m
);
405 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error
, recv
);
408 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
411 lck_mtx_lock(&nso
->nso_lock
);
413 /* match response with request */
414 struct nfsm_chain nmrep
;
415 uint32_t reply
= 0, rxid
= 0, verf_type
, verf_len
;
416 uint32_t reply_status
, rejected_status
, accepted_status
;
418 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m
);
419 nfsm_chain_dissect_init(error
, &nmrep
, m
);
420 nfsm_chain_get_32(error
, &nmrep
, rxid
);
421 nfsm_chain_get_32(error
, &nmrep
, reply
);
422 if (!error
&& ((reply
!= RPC_REPLY
) || (rxid
!= nso
->nso_pingxid
))) {
425 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
426 if (!error
&& (reply_status
== RPC_MSGDENIED
)) {
427 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
429 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
432 nfsm_chain_get_32(error
, &nmrep
, verf_type
); /* verifier flavor */
433 nfsm_chain_get_32(error
, &nmrep
, verf_len
); /* verifier length */
436 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
438 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
440 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status
, nso
->nso_version
);
441 if ((accepted_status
== RPC_PROGMISMATCH
) && !nso
->nso_version
) {
442 uint32_t minvers
, maxvers
;
443 nfsm_chain_get_32(error
, &nmrep
, minvers
);
444 nfsm_chain_get_32(error
, &nmrep
, maxvers
);
446 if (nso
->nso_protocol
== PMAPPROG
) {
447 if ((minvers
> RPCBVERS4
) || (maxvers
< PMAPVERS
)) {
448 error
= EPROGMISMATCH
;
449 } else if ((nso
->nso_saddr
->sa_family
== AF_INET
) &&
450 (PMAPVERS
>= minvers
) && (PMAPVERS
<= maxvers
)) {
451 nso
->nso_version
= PMAPVERS
;
452 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
453 if ((RPCBVERS4
>= minvers
) && (RPCBVERS4
<= maxvers
)) {
454 nso
->nso_version
= RPCBVERS4
;
455 } else if ((RPCBVERS3
>= minvers
) && (RPCBVERS3
<= maxvers
)) {
456 nso
->nso_version
= RPCBVERS3
;
459 } else if (nso
->nso_protocol
== NFS_PROG
) {
463 * N.B. Both portmapper and rpcbind V3 are happy to return
464 * addresses for other versions than the one you ask (getport or
465 * getaddr) and thus we may have fallen to this code path. So if
466 * we get a version that we support, use highest supported
467 * version. This assumes that the server supports all versions
468 * between minvers and maxvers. Note for IPv6 we will try and
469 * use rpcbind V4 which has getversaddr and we should not get
470 * here if that was successful.
472 for (vers
= nso
->nso_nfs_max_vers
; vers
>= (int)nso
->nso_nfs_min_vers
; vers
--) {
473 if (vers
>= (int)minvers
&& vers
<= (int)maxvers
) {
477 nso
->nso_version
= (vers
< (int)nso
->nso_nfs_min_vers
) ? 0 : vers
;
479 if (!error
&& nso
->nso_version
) {
480 accepted_status
= RPC_SUCCESS
;
484 switch (accepted_status
) {
488 case RPC_PROGUNAVAIL
:
489 error
= EPROGUNAVAIL
;
491 case RPC_PROGMISMATCH
:
492 error
= EPROGMISMATCH
;
494 case RPC_PROCUNAVAIL
:
495 error
= EPROCUNAVAIL
;
507 nso
->nso_flags
&= ~NSO_PINGING
;
509 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
510 nso
->nso_protocol
, nso
->nso_version
, error
);
511 nso
->nso_error
= error
;
512 nso
->nso_flags
|= NSO_DEAD
;
514 nso
->nso_flags
|= NSO_VERIFIED
;
517 /* wake up search thread */
518 wakeup(nso
->nso_wake
);
523 nso
->nso_flags
&= ~NSO_UPCALL
;
524 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
525 /* problems with the socket... */
526 NFS_SOCK_DBG("connect upcall failed %d\n", error
);
527 nso
->nso_error
= error
? error
: EPIPE
;
528 nso
->nso_flags
|= NSO_DEAD
;
529 wakeup(nso
->nso_wake
);
531 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
532 wakeup(&nso
->nso_flags
);
534 lck_mtx_unlock(&nso
->nso_lock
);
538 * Create/initialize an nfs_socket structure.
542 struct nfsmount
*nmp
,
549 struct nfs_socket
**nsop
)
551 struct nfs_socket
*nso
;
554 #define NFS_SOCKET_DEBUGGING
555 #ifdef NFS_SOCKET_DEBUGGING
556 char naddr
[sizeof((struct sockaddr_un
*)0)->sun_path
];
559 switch (sa
->sa_family
) {
561 if (sa
->sa_len
!= sizeof(struct sockaddr_in
)) {
564 sinaddr
= &((struct sockaddr_in
*)sa
)->sin_addr
;
565 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
) {
566 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
570 if (sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
573 sinaddr
= &((struct sockaddr_in6
*)sa
)->sin6_addr
;
574 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
) {
575 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
579 if (sa
->sa_len
!= sizeof(struct sockaddr_un
) && sa
->sa_len
!= SUN_LEN((struct sockaddr_un
*)sa
)) {
582 strlcpy(naddr
, ((struct sockaddr_un
*)sa
)->sun_path
, sizeof(naddr
));
585 strlcpy(naddr
, "<unsupported address family>", sizeof(naddr
));
589 char naddr
[1] = { 0 };
594 /* Create the socket. */
595 MALLOC(nso
, struct nfs_socket
*, sizeof(struct nfs_socket
), M_TEMP
, M_WAITOK
| M_ZERO
);
597 MALLOC(nso
->nso_saddr
, struct sockaddr
*, sa
->sa_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
599 if (!nso
|| !nso
->nso_saddr
) {
605 lck_mtx_init(&nso
->nso_lock
, &nfs_request_grp
, LCK_ATTR_NULL
);
606 nso
->nso_sotype
= sotype
;
607 if (nso
->nso_sotype
== SOCK_STREAM
) {
608 nfs_rpc_record_state_init(&nso
->nso_rrs
);
611 nso
->nso_timestamp
= now
.tv_sec
;
612 bcopy(sa
, nso
->nso_saddr
, sa
->sa_len
);
613 switch (sa
->sa_family
) {
616 if (sa
->sa_family
== AF_INET
) {
617 ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
618 } else if (sa
->sa_family
== AF_INET6
) {
619 ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
625 nso
->nso_protocol
= protocol
;
626 nso
->nso_version
= vers
;
627 nso
->nso_nfs_min_vers
= PVER2MAJOR(nmp
->nm_min_vers
);
628 nso
->nso_nfs_max_vers
= PVER2MAJOR(nmp
->nm_max_vers
);
630 error
= sock_socket(sa
->sa_family
, nso
->nso_sotype
, 0, NULL
, NULL
, &nso
->nso_so
);
632 /* Some servers require that the client port be a reserved port number. */
633 if (!error
&& resvport
&& ((sa
->sa_family
== AF_INET
) || (sa
->sa_family
== AF_INET6
))) {
634 struct sockaddr_storage ss
;
635 int level
= (sa
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
636 int optname
= (sa
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
637 int portrange
= IP_PORTRANGE_LOW
;
639 error
= sock_setsockopt(nso
->nso_so
, level
, optname
, &portrange
, sizeof(portrange
));
640 if (!error
) { /* bind now to check for failure */
641 ss
.ss_len
= sa
->sa_len
;
642 ss
.ss_family
= sa
->sa_family
;
643 if (ss
.ss_family
== AF_INET
) {
644 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
645 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
646 } else if (ss
.ss_family
== AF_INET6
) {
647 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
648 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
653 error
= sock_bind(nso
->nso_so
, (struct sockaddr
*)&ss
);
659 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
660 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nso
, naddr
, sotype
,
661 resvport
? "r" : "", port
, protocol
, vers
);
662 nfs_socket_destroy(nso
);
664 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
665 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, naddr
,
666 sotype
, resvport
? "r" : "", port
, protocol
, vers
);
673 * Destroy an nfs_socket structure.
676 nfs_socket_destroy(struct nfs_socket
*nso
)
678 struct timespec ts
= { .tv_sec
= 4, .tv_nsec
= 0 };
680 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso
, nso
->nso_flags
, nso
->nso_error
);
681 lck_mtx_lock(&nso
->nso_lock
);
682 nso
->nso_flags
|= NSO_DISCONNECTING
;
683 if (nso
->nso_flags
& NSO_UPCALL
) { /* give upcall a chance to complete */
684 msleep(&nso
->nso_flags
, &nso
->nso_lock
, PZERO
- 1, "nfswaitupcall", &ts
);
686 lck_mtx_unlock(&nso
->nso_lock
);
687 sock_shutdown(nso
->nso_so
, SHUT_RDWR
);
688 sock_close(nso
->nso_so
);
689 if (nso
->nso_sotype
== SOCK_STREAM
) {
690 nfs_rpc_record_state_cleanup(&nso
->nso_rrs
);
692 lck_mtx_destroy(&nso
->nso_lock
, &nfs_request_grp
);
693 if (nso
->nso_saddr
) {
694 FREE(nso
->nso_saddr
, M_SONAME
);
696 if (nso
->nso_saddr2
) {
697 FREE(nso
->nso_saddr2
, M_SONAME
);
699 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso
);
704 * Set common socket options on an nfs_socket.
707 nfs_socket_options(struct nfsmount
*nmp
, struct nfs_socket
*nso
)
710 * Set socket send/receive timeouts
711 * - Receive timeout shouldn't matter because most receives are performed
712 * in the socket upcall non-blocking.
713 * - Send timeout should allow us to react to a blocked socket.
714 * Soft mounts will want to abort sooner.
716 struct timeval timeo
;
717 int on
= 1, proto
, reserve
, error
;
720 timeo
.tv_sec
= (NMFLAG(nmp
, SOFT
) || nfs_can_squish(nmp
)) ? 5 : 60;
721 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
722 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
723 if (nso
->nso_sotype
== SOCK_STREAM
) {
724 /* Assume that SOCK_STREAM always requires a connection */
725 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_KEEPALIVE
, &on
, sizeof(on
));
726 /* set nodelay for TCP */
727 sock_gettype(nso
->nso_so
, NULL
, NULL
, &proto
);
728 if (proto
== IPPROTO_TCP
) {
729 sock_setsockopt(nso
->nso_so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
733 /* set socket buffer sizes for UDP/TCP */
734 reserve
= (nso
->nso_sotype
== SOCK_DGRAM
) ? NFS_UDPSOCKBUF
: MAX(nfs_tcp_sockbuf
, nmp
->nm_wsize
* 2);
736 error
= sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDBUF
, &reserve
, sizeof(reserve
));
740 log(LOG_INFO
, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error
, reserve
);
743 reserve
= (nso
->nso_sotype
== SOCK_DGRAM
) ? NFS_UDPSOCKBUF
: MAX(nfs_tcp_sockbuf
, nmp
->nm_rsize
* 2);
744 error
= sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVBUF
, &reserve
, sizeof(reserve
));
746 log(LOG_INFO
, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error
, reserve
);
749 /* set SO_NOADDRERR to detect network changes ASAP */
750 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
751 /* just playin' it safe with upcalls */
752 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
753 /* socket should be interruptible if the mount is */
754 if (!NMFLAG(nmp
, INTR
)) {
755 sock_nointerrupt(nso
->nso_so
, 1);
760 * Release resources held in an nfs_socket_search.
763 nfs_socket_search_cleanup(struct nfs_socket_search
*nss
)
765 struct nfs_socket
*nso
, *nsonext
;
767 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
768 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
770 nfs_socket_destroy(nso
);
773 nfs_socket_destroy(nss
->nss_sock
);
774 nss
->nss_sock
= NULL
;
779 * Prefer returning certain errors over others.
780 * This function returns a ranking of the given error.
783 nfs_connect_error_class(int error
)
818 * Make sure a socket search returns the best error.
821 nfs_socket_search_update_error(struct nfs_socket_search
*nss
, int error
)
823 if (nfs_connect_error_class(error
) >= nfs_connect_error_class(nss
->nss_error
)) {
824 nss
->nss_error
= error
;
828 /* nfs_connect_search_new_socket:
829 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
832 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
833 * could not be used or if a socket timed out.
836 nfs_connect_search_new_socket(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
838 struct nfs_fs_location
*fsl
;
839 struct nfs_fs_server
*fss
;
840 struct sockaddr_storage ss
;
841 struct nfs_socket
*nso
;
846 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
847 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
->nss_addrcnt
);
850 * while there are addresses and:
851 * we have no sockets or
852 * the last address failed and did not produce a socket (nss_last < 0) or
853 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
854 * then attempt to create a socket with the current address.
856 while (nss
->nss_addrcnt
> 0 && ((nss
->nss_last
< 0) || (nss
->nss_sockcnt
== 0) ||
857 ((nss
->nss_sockcnt
< 4) && (now
->tv_sec
>= (nss
->nss_last
+ 2))))) {
858 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
861 /* Can we convert the address to a sockaddr? */
862 fsl
= nmp
->nm_locations
.nl_locations
[nss
->nss_nextloc
.nli_loc
];
863 fss
= fsl
->nl_servers
[nss
->nss_nextloc
.nli_serv
];
864 addrstr
= fss
->ns_addresses
[nss
->nss_nextloc
.nli_addr
];
865 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr
, nss
->nss_protocol
, nss
->nss_port
);
866 if (*addrstr
== '\0') {
868 * We have an unspecified local domain address. We use the program to translate to
869 * a well known local transport address. We only support PMAPROG and NFS for this.
871 if (nss
->nss_protocol
== PMAPPROG
) {
872 addrstr
= (nss
->nss_sotype
== SOCK_DGRAM
) ? RPCB_TICLTS_PATH
: RPCB_TICOTSORD_PATH
;
873 } else if (nss
->nss_protocol
== NFS_PROG
) {
874 addrstr
= nmp
->nm_nfs_localport
;
875 if (!addrstr
|| *addrstr
== '\0') {
876 addrstr
= (nss
->nss_sotype
== SOCK_DGRAM
) ? NFS_TICLTS_PATH
: NFS_TICOTSORD_PATH
;
879 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss
->nss_protocol
, addrstr
);
881 if (!nfs_uaddr2sockaddr(addrstr
, (struct sockaddr
*)&ss
)) {
882 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr
);
883 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
884 nss
->nss_addrcnt
-= 1;
888 /* Check that socket family is acceptable. */
889 if (nmp
->nm_sofamily
&& (ss
.ss_family
!= nmp
->nm_sofamily
)) {
890 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss
.ss_family
, nmp
->nm_sofamily
);
891 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
892 nss
->nss_addrcnt
-= 1;
897 /* Create the socket. */
898 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nss
->nss_sotype
,
899 nss
->nss_port
, nss
->nss_protocol
, nss
->nss_version
,
900 ((nss
->nss_protocol
== NFS_PROG
) && NMFLAG(nmp
, RESVPORT
)), &nso
);
905 nso
->nso_location
= nss
->nss_nextloc
;
907 error
= sock_setupcall(nso
->nso_so
, nfs_connect_upcall
, nso
);
909 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso
, error
);
910 lck_mtx_lock(&nso
->nso_lock
);
911 nso
->nso_error
= error
;
912 nso
->nso_flags
|= NSO_DEAD
;
913 lck_mtx_unlock(&nso
->nso_lock
);
916 TAILQ_INSERT_TAIL(&nss
->nss_socklist
, nso
, nso_link
);
918 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
919 nss
->nss_addrcnt
-= 1;
921 nss
->nss_last
= now
->tv_sec
;
924 if (nss
->nss_addrcnt
== 0 && nss
->nss_last
< 0) {
925 nss
->nss_last
= now
->tv_sec
;
932 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
933 * If successful set the socket options for the socket as require from the mount.
935 * Assumes: nso->nso_lock is held on entry and return.
938 nfs_connect_search_socket_connect(struct nfsmount
*nmp
, struct nfs_socket
*nso
, int verbose
)
942 if ((nso
->nso_sotype
!= SOCK_STREAM
) && NMFLAG(nmp
, NOCONNECT
)) {
943 /* no connection needed, just say it's already connected */
944 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
945 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
946 nso
->nso_flags
|= NSO_CONNECTED
;
947 nfs_socket_options(nmp
, nso
);
948 return 1; /* Socket is connected and setup */
949 } else if (!(nso
->nso_flags
& NSO_CONNECTING
)) {
950 /* initiate the connection */
951 nso
->nso_flags
|= NSO_CONNECTING
;
952 lck_mtx_unlock(&nso
->nso_lock
);
953 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
954 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
,
955 nso
->nso_saddr
->sa_family
== AF_LOCAL
? ((struct sockaddr_un
*)nso
->nso_saddr
)->sun_path
: "");
956 error
= sock_connect(nso
->nso_so
, nso
->nso_saddr
, MSG_DONTWAIT
);
958 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
959 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
961 lck_mtx_lock(&nso
->nso_lock
);
962 if (error
&& (error
!= EINPROGRESS
)) {
963 nso
->nso_error
= error
;
964 nso
->nso_flags
|= NSO_DEAD
;
968 if (nso
->nso_flags
& NSO_CONNECTING
) {
969 /* check the connection */
970 if (sock_isconnected(nso
->nso_so
)) {
971 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
972 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
973 nso
->nso_flags
&= ~NSO_CONNECTING
;
974 nso
->nso_flags
|= NSO_CONNECTED
;
975 nfs_socket_options(nmp
, nso
);
976 return 1; /* Socket is connected and setup */
978 int optlen
= sizeof(error
);
980 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &error
, &optlen
);
981 if (error
) { /* we got an error on the socket */
982 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
983 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
985 printf("nfs connect socket error %d for %s\n",
986 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
988 nso
->nso_error
= error
;
989 nso
->nso_flags
|= NSO_DEAD
;
995 return 0; /* Waiting to be connected */
999 * nfs_connect_search_ping: Send a null proc on the nso socket.
1002 nfs_connect_search_ping(struct nfsmount
*nmp
, struct nfs_socket
*nso
, struct timeval
*now
)
1004 /* initiate a NULL RPC request */
1005 uint64_t xid
= nso
->nso_pingxid
;
1006 mbuf_t m
, mreq
= NULL
;
1008 size_t reqlen
, sentlen
;
1009 uint32_t vers
= nso
->nso_version
;
1013 if (nso
->nso_protocol
== PMAPPROG
) {
1014 vers
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
1015 } else if (nso
->nso_protocol
== NFS_PROG
) {
1016 vers
= PVER2MAJOR(nmp
->nm_max_vers
);
1019 lck_mtx_unlock(&nso
->nso_lock
);
1020 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso
, nso
->nso_sotype
, nso
->nso_protocol
, vers
);
1021 error
= nfsm_rpchead2(nmp
, nso
->nso_sotype
, nso
->nso_protocol
, vers
, 0, RPCAUTH_SYS
,
1022 vfs_context_ucred(vfs_context_kernel()), NULL
, NULL
, &xid
, &mreq
);
1023 lck_mtx_lock(&nso
->nso_lock
);
1025 nso
->nso_flags
|= NSO_PINGING
;
1026 nso
->nso_pingxid
= R_XID32(xid
);
1027 nso
->nso_reqtimestamp
= now
->tv_sec
;
1028 bzero(&msg
, sizeof(msg
));
1029 if ((nso
->nso_sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
)) {
1030 msg
.msg_name
= nso
->nso_saddr
;
1031 msg
.msg_namelen
= nso
->nso_saddr
->sa_len
;
1033 for (reqlen
= 0, m
= mreq
; m
; m
= mbuf_next(m
)) {
1034 reqlen
+= mbuf_len(m
);
1036 lck_mtx_unlock(&nso
->nso_lock
);
1037 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq
);
1038 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreq
, 0, &sentlen
);
1039 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1040 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1041 lck_mtx_lock(&nso
->nso_lock
);
1042 if (!error
&& (sentlen
!= reqlen
)) {
1047 nso
->nso_error
= error
;
1048 nso
->nso_flags
|= NSO_DEAD
;
1056 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1057 * Set the nfs socket protocol and version if needed.
1060 nfs_connect_search_socket_found(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct nfs_socket
*nso
)
1062 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1063 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1064 if (!nso
->nso_version
) {
1065 /* If the version isn't set, the default must have worked. */
1066 if (nso
->nso_protocol
== PMAPPROG
) {
1067 nso
->nso_version
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
1069 if (nso
->nso_protocol
== NFS_PROG
) {
1070 nso
->nso_version
= PVER2MAJOR(nmp
->nm_max_vers
);
1073 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
1075 nss
->nss_sock
= nso
;
1079 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1080 * the list. Dead socket are then destroyed.
1083 nfs_connect_search_socket_reap(struct nfsmount
*nmp __unused
, struct nfs_socket_search
*nss
, struct timeval
*now
)
1085 struct nfs_socket
*nso
, *nsonext
;
1087 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
1088 lck_mtx_lock(&nso
->nso_lock
);
1089 if (now
->tv_sec
>= (nso
->nso_timestamp
+ nss
->nss_timeo
)) {
1091 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1092 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1093 nso
->nso_error
= ETIMEDOUT
;
1094 nso
->nso_flags
|= NSO_DEAD
;
1096 if (!(nso
->nso_flags
& NSO_DEAD
)) {
1097 lck_mtx_unlock(&nso
->nso_lock
);
1100 lck_mtx_unlock(&nso
->nso_lock
);
1101 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1102 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, nso
->nso_error
, nso
->nso_flags
);
1103 nfs_socket_search_update_error(nss
, nso
->nso_error
);
1104 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
1106 nfs_socket_destroy(nso
);
1107 /* If there are more sockets to try, force the starting of another socket */
1108 if (nss
->nss_addrcnt
> 0) {
1115 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1118 nfs_connect_search_check(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
1122 /* log a warning if connect is taking a while */
1123 if (((now
->tv_sec
- nss
->nss_timestamp
) >= 8) && ((nss
->nss_flags
& (NSS_VERBOSE
| NSS_WARNED
)) == NSS_VERBOSE
)) {
1124 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1125 nss
->nss_flags
|= NSS_WARNED
;
1127 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
1130 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 0))) {
1134 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1135 if (nss
->nss_last
>= 0) {
1136 tsleep(nss
, PSOCK
, "nfs_connect_search_wait", hz
);
1144 * Continue the socket search until we have something to report.
1147 nfs_connect_search_loop(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
)
1149 struct nfs_socket
*nso
;
1152 int verbose
= (nss
->nss_flags
& NSS_VERBOSE
);
1156 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, now
.tv_sec
);
1158 /* add a new socket to the socket list if needed and available */
1159 error
= nfs_connect_search_new_socket(nmp
, nss
, &now
);
1161 NFS_SOCK_DBG("nfs connect returned %d\n", error
);
1165 /* check each active socket on the list and try to push it along */
1166 TAILQ_FOREACH(nso
, &nss
->nss_socklist
, nso_link
) {
1167 lck_mtx_lock(&nso
->nso_lock
);
1169 /* If not connected connect it */
1170 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
1171 if (!nfs_connect_search_socket_connect(nmp
, nso
, verbose
)) {
1172 lck_mtx_unlock(&nso
->nso_lock
);
1177 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1178 if (!(nso
->nso_flags
& (NSO_PINGING
| NSO_VERIFIED
)) ||
1179 ((nso
->nso_sotype
== SOCK_DGRAM
) && (now
.tv_sec
>= nso
->nso_reqtimestamp
+ 2))) {
1180 if (!nfs_connect_search_ping(nmp
, nso
, &now
)) {
1181 lck_mtx_unlock(&nso
->nso_lock
);
1186 /* Has the socket been verified by the up call routine? */
1187 if (nso
->nso_flags
& NSO_VERIFIED
) {
1188 /* WOOHOO!! This socket looks good! */
1189 nfs_connect_search_socket_found(nmp
, nss
, nso
);
1190 lck_mtx_unlock(&nso
->nso_lock
);
1193 lck_mtx_unlock(&nso
->nso_lock
);
1196 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1197 nfs_connect_search_socket_reap(nmp
, nss
, &now
);
1200 * Keep looping if we haven't found a socket yet and we have more
1201 * sockets to (continue to) try.
1204 if (!nss
->nss_sock
&& (!TAILQ_EMPTY(&nss
->nss_socklist
) || nss
->nss_addrcnt
)) {
1205 error
= nfs_connect_search_check(nmp
, nss
, &now
);
1211 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1216 * Initialize a new NFS connection.
1218 * Search for a location to connect a socket to and initialize the connection.
1220 * An NFS mount may have multiple locations/servers/addresses available.
1221 * We attempt to connect to each one asynchronously and will start
1222 * several sockets in parallel if other locations are slow to answer.
1223 * We'll use the first NFS socket we can successfully set up.
1225 * The search may involve contacting the portmapper service first.
1227 * A mount's initial connection may require negotiating some parameters such
1228 * as socket type and NFS version.
1232 nfs_connect(struct nfsmount
*nmp
, int verbose
, int timeo
)
1234 struct nfs_socket_search nss
;
1235 struct nfs_socket
*nso
, *nsonfs
;
1236 struct sockaddr_storage ss
;
1237 struct sockaddr
*saddr
, *oldsaddr
;
1242 struct timeval start
;
1243 int error
, savederror
, nfsvers
;
1245 uint8_t sotype
= nmp
->nm_sotype
? nmp
->nm_sotype
: SOCK_STREAM
;
1246 fhandle_t
*fh
= NULL
;
1251 /* paranoia... check that we have at least one address in the locations */
1253 for (loc
= 0; loc
< nmp
->nm_locations
.nl_numlocs
; loc
++) {
1254 for (serv
= 0; serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
; serv
++) {
1255 addrtotal
+= nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
;
1256 if (nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
== 0) {
1257 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1258 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1259 nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_name
);
1264 if (addrtotal
== 0) {
1265 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1266 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1269 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1270 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, addrtotal
);
1273 lck_mtx_lock(&nmp
->nm_lock
);
1274 nmp
->nm_sockflags
|= NMSOCK_CONNECTING
;
1276 lck_mtx_unlock(&nmp
->nm_lock
);
1277 microuptime(&start
);
1278 savederror
= error
= 0;
1281 /* initialize socket search state */
1282 bzero(&nss
, sizeof(nss
));
1283 nss
.nss_addrcnt
= addrtotal
;
1284 nss
.nss_error
= savederror
;
1285 TAILQ_INIT(&nss
.nss_socklist
);
1286 nss
.nss_sotype
= sotype
;
1287 nss
.nss_startloc
= nmp
->nm_locations
.nl_current
;
1288 nss
.nss_timestamp
= start
.tv_sec
;
1289 nss
.nss_timeo
= timeo
;
1291 nss
.nss_flags
|= NSS_VERBOSE
;
1294 /* First time connecting, we may need to negotiate some things */
1295 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1296 NFS_SOCK_DBG("so_family = %d\n", nmp
->nm_sofamily
);
1297 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp
->nm_nfsport
, nmp
->nm_nfs_localport
? nmp
->nm_nfs_localport
: "");
1298 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp
->nm_mountport
, nmp
->nm_mount_localport
? nmp
->nm_mount_localport
: "");
1299 if (!nmp
->nm_vers
) {
1300 /* No NFS version specified... */
1301 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1303 if (PVER2MAJOR(nmp
->nm_max_vers
) >= NFS_VER4
&& tryv4
) {
1304 nss
.nss_port
= NFS_PORT
;
1305 nss
.nss_protocol
= NFS_PROG
;
1306 nss
.nss_version
= 4;
1307 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1310 /* ...connect to portmapper first if we (may) need any ports. */
1311 nss
.nss_port
= PMAPPORT
;
1312 nss
.nss_protocol
= PMAPPROG
;
1313 nss
.nss_version
= 0;
1318 /* ...connect to NFS port first. */
1319 nss
.nss_port
= nmp
->nm_nfsport
;
1320 nss
.nss_protocol
= NFS_PROG
;
1321 nss
.nss_version
= 0;
1324 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1326 /* For NFSv4, we use the given (or default) port. */
1327 nss
.nss_port
= nmp
->nm_nfsport
? nmp
->nm_nfsport
: NFS_PORT
;
1328 nss
.nss_protocol
= NFS_PROG
;
1329 nss
.nss_version
= 4;
1331 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1332 * if no port is specified on the mount;
1333 * Note nm_vers is set so we will only try NFS_VER4.
1335 if (!nmp
->nm_nfsport
) {
1336 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1339 nss
.nss_port
= PMAPPORT
;
1340 nss
.nss_protocol
= PMAPPROG
;
1341 nss
.nss_version
= 0;
1345 /* For NFSv3/v2... */
1346 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1347 /* ...connect to portmapper first if we need any ports. */
1348 nss
.nss_port
= PMAPPORT
;
1349 nss
.nss_protocol
= PMAPPROG
;
1350 nss
.nss_version
= 0;
1352 /* ...connect to NFS port first. */
1353 nss
.nss_port
= nmp
->nm_nfsport
;
1354 nss
.nss_protocol
= NFS_PROG
;
1355 nss
.nss_version
= nmp
->nm_vers
;
1358 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1359 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1360 nss
.nss_protocol
, nss
.nss_version
);
1362 /* we've connected before, just connect to NFS port */
1363 if (!nmp
->nm_nfsport
) {
1364 /* need to ask portmapper which port that would be */
1365 nss
.nss_port
= PMAPPORT
;
1366 nss
.nss_protocol
= PMAPPROG
;
1367 nss
.nss_version
= 0;
1369 nss
.nss_port
= nmp
->nm_nfsport
;
1370 nss
.nss_protocol
= NFS_PROG
;
1371 nss
.nss_version
= nmp
->nm_vers
;
1373 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1374 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1375 nss
.nss_protocol
, nss
.nss_version
);
1378 /* Set next location to first valid location. */
1379 /* If start location is invalid, find next location. */
1380 nss
.nss_nextloc
= nss
.nss_startloc
;
1381 if ((nss
.nss_nextloc
.nli_serv
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servcount
) ||
1382 (nss
.nss_nextloc
.nli_addr
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servers
[nss
.nss_nextloc
.nli_serv
]->ns_addrcount
)) {
1383 nfs_location_next(&nmp
->nm_locations
, &nss
.nss_nextloc
);
1384 if (!nfs_location_index_cmp(&nss
.nss_nextloc
, &nss
.nss_startloc
)) {
1385 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1386 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1394 error
= nfs_connect_search_loop(nmp
, &nss
);
1395 if (error
|| !nss
.nss_sock
) {
1397 nfs_socket_search_cleanup(&nss
);
1398 if (nss
.nss_flags
& NSS_FALLBACK2PMAP
) {
1400 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1401 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1405 if (!error
&& (nss
.nss_sotype
== SOCK_STREAM
) && !nmp
->nm_sotype
&& (nmp
->nm_vers
< NFS_VER4
)) {
1407 sotype
= SOCK_DGRAM
;
1408 savederror
= nss
.nss_error
;
1409 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1410 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1414 error
= nss
.nss_error
? nss
.nss_error
: ETIMEDOUT
;
1416 lck_mtx_lock(&nmp
->nm_lock
);
1417 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1419 lck_mtx_unlock(&nmp
->nm_lock
);
1420 if (nss
.nss_flags
& NSS_WARNED
) {
1421 log(LOG_INFO
, "nfs_connect: socket connect aborted for %s\n",
1422 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1425 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1428 NFS_ZFREE(ZV_NAMEI
, path
);
1430 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1431 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1435 /* try to use nss_sock */
1437 nss
.nss_sock
= NULL
;
1439 /* We may be speaking to portmap first... to determine port(s). */
1440 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1441 port
= ntohs(((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
);
1442 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
1443 port
= ntohs(((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
);
1444 } else if (nso
->nso_saddr
->sa_family
== AF_LOCAL
) {
1445 if (nso
->nso_protocol
== PMAPPROG
) {
1450 if (port
== PMAPPORT
) {
1451 /* Use this portmapper port to get the port #s we need. */
1452 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1453 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1455 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1456 sock_setupcall(nso
->nso_so
, NULL
, NULL
);
1458 /* Set up socket address and port for NFS socket. */
1459 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1461 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1462 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: PVER2MAJOR(nmp
->nm_max_vers
);
1463 if (!(port
= nmp
->nm_nfsport
)) {
1464 if (ss
.ss_family
== AF_INET
) {
1465 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
1466 } else if (ss
.ss_family
== AF_INET6
) {
1467 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
1468 } else if (ss
.ss_family
== AF_LOCAL
) {
1469 if (((struct sockaddr_un
*)&ss
)->sun_path
[0] == '/') {
1470 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un
*)&ss
)->sun_path
);
1473 for (; nfsvers
>= (int)PVER2MAJOR(nmp
->nm_min_vers
); nfsvers
--) {
1474 if (nmp
->nm_vers
&& nmp
->nm_vers
!= nfsvers
) {
1475 continue; /* Wrong version */
1478 if (nfsvers
== NFS_VER4
&& nso
->nso_sotype
== SOCK_DGRAM
) {
1479 continue; /* NFSv4 does not do UDP */
1482 if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_nfs_localport
) {
1483 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
1484 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp
->nm_nfs_localport
);
1485 strlcpy(sun
->sun_path
, nmp
->nm_nfs_localport
, sizeof(sun
->sun_path
));
1488 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1489 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1490 nso
->nso_so
, NFS_PROG
, nfsvers
, nso
->nso_sotype
, timeo
);
1493 if (ss
.ss_family
== AF_INET
) {
1494 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1495 } else if (ss
.ss_family
== AF_INET6
) {
1496 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1497 } else if (ss
.ss_family
== AF_LOCAL
) {
1498 port
= ((struct sockaddr_un
*)&ss
)->sun_path
[0] ? NFS_PORT
: 0;
1501 error
= EPROGUNAVAIL
;
1504 if (port
== NFS_PORT
&& nfsvers
== NFS_VER4
&& tryv4
== 0) {
1505 continue; /* We already tried this */
1513 if (nfsvers
< (int)PVER2MAJOR(nmp
->nm_min_vers
) && error
== 0) {
1514 error
= EPROGUNAVAIL
;
1517 nfs_socket_search_update_error(&nss
, error
);
1518 nfs_socket_destroy(nso
);
1519 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers
, error
);
1522 } else if (nmp
->nm_nfs_localport
) {
1523 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_nfs_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1524 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp
->nm_nfs_localport
);
1527 /* Create NFS protocol socket and add it to the list of sockets. */
1528 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1529 if (ss
.ss_family
== AF_LOCAL
) {
1530 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un
*)&ss
)->sun_path
, port
);
1532 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nso
->nso_sotype
, port
,
1533 NFS_PROG
, nfsvers
, NMFLAG(nmp
, RESVPORT
), &nsonfs
);
1535 nfs_socket_search_update_error(&nss
, error
);
1536 nfs_socket_destroy(nso
);
1537 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error
);
1540 nsonfs
->nso_location
= nso
->nso_location
;
1541 nsonfs
->nso_wake
= &nss
;
1542 error
= sock_setupcall(nsonfs
->nso_so
, nfs_connect_upcall
, nsonfs
);
1544 nfs_socket_search_update_error(&nss
, error
);
1545 nfs_socket_destroy(nsonfs
);
1546 nfs_socket_destroy(nso
);
1547 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error
);
1550 TAILQ_INSERT_TAIL(&nss
.nss_socklist
, nsonfs
, nso_link
);
1552 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1553 /* Set up socket address and port for MOUNT socket. */
1555 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1556 port
= nmp
->nm_mountport
;
1557 NFS_SOCK_DBG("mount port = %d\n", port
);
1558 if (ss
.ss_family
== AF_INET
) {
1559 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1560 } else if (ss
.ss_family
== AF_INET6
) {
1561 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1562 } else if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_mount_localport
) {
1563 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp
->nm_mount_localport
, nmp
->nm_mountport
);
1564 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_mount_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1567 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1568 /* If NFS version is unknown, optimistically choose for NFSv3. */
1569 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1570 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1571 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso
->nso_so
);
1572 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1573 nso
->nso_so
, RPCPROG_MNT
, mntvers
, mntproto
== IPPROTO_UDP
? SOCK_DGRAM
: SOCK_STREAM
, timeo
);
1576 if (ss
.ss_family
== AF_INET
) {
1577 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1578 } else if (ss
.ss_family
== AF_INET6
) {
1579 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1580 } else if (ss
.ss_family
== AF_LOCAL
) {
1581 port
= (((struct sockaddr_un
*)&ss
)->sun_path
[0] != '\0');
1584 error
= EPROGUNAVAIL
;
1587 /* create sockaddr for MOUNT */
1589 MALLOC(nsonfs
->nso_saddr2
, struct sockaddr
*, ss
.ss_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
1591 if (!error
&& !nsonfs
->nso_saddr2
) {
1595 bcopy(&ss
, nsonfs
->nso_saddr2
, ss
.ss_len
);
1598 NFS_SOCK_DBG("Could not create mount sockaet address %d", error
);
1599 lck_mtx_lock(&nsonfs
->nso_lock
);
1600 nsonfs
->nso_error
= error
;
1601 nsonfs
->nso_flags
|= NSO_DEAD
;
1602 lck_mtx_unlock(&nsonfs
->nso_lock
);
1605 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso
, nso
->nso_so
);
1606 nfs_socket_destroy(nso
);
1610 /* nso is an NFS socket */
1611 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1613 /* If NFS version wasn't specified, it was determined during the connect. */
1614 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: (int)nso
->nso_version
;
1616 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1617 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1619 saddr
= nso
->nso_saddr2
;
1621 /* Need sockaddr for MOUNT port */
1622 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp
->nm_mountport
, nmp
->nm_mount_localport
);
1623 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1624 port
= nmp
->nm_mountport
;
1625 if (ss
.ss_family
== AF_INET
) {
1626 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1627 } else if (ss
.ss_family
== AF_INET6
) {
1628 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1629 } else if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_mount_localport
) {
1630 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp
->nm_mount_localport
, nmp
->nm_mountport
);
1631 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_mount_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1634 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1635 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1636 int so_type
= NM_OMFLAG(nmp
, MNTUDP
) ? SOCK_DGRAM
: nso
->nso_sotype
;
1637 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1638 NULL
, RPCPROG_MNT
, mntvers
, so_type
, timeo
);
1639 if (ss
.ss_family
== AF_INET
) {
1640 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1641 } else if (ss
.ss_family
== AF_INET6
) {
1642 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1647 saddr
= (struct sockaddr
*)&ss
;
1649 error
= EPROGUNAVAIL
;
1654 error
= nfs3_check_lockmode(nmp
, saddr
, nso
->nso_sotype
, timeo
);
1656 nfs_socket_search_update_error(&nss
, error
);
1657 nfs_socket_destroy(nso
);
1662 fh
= zalloc(nfs_fhandle_zone
);
1665 path
= zalloc(ZV_NAMEI
);
1667 if (!saddr
|| !fh
|| !path
) {
1672 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1675 NFS_ZFREE(ZV_NAMEI
, path
);
1677 nfs_socket_search_update_error(&nss
, error
);
1678 nfs_socket_destroy(nso
);
1681 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
, path
, MAXPATHLEN
, 1);
1682 error
= nfs3_mount_rpc(nmp
, saddr
, nso
->nso_sotype
, nfsvers
,
1683 path
, vfs_context_current(), timeo
, fh
, &nmp
->nm_servsec
);
1684 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1685 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1687 /* Make sure we can agree on a security flavor. */
1688 int o
, s
; /* indices into mount option and server security flavor lists */
1691 if ((nfsvers
== NFS_VER3
) && !nmp
->nm_servsec
.count
) {
1692 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1693 nmp
->nm_servsec
.count
= 1;
1694 nmp
->nm_servsec
.flavors
[0] = RPCAUTH_SYS
;
1696 if (nmp
->nm_sec
.count
) {
1697 /* Choose the first flavor in our list that the server supports. */
1698 if (!nmp
->nm_servsec
.count
) {
1699 /* we don't know what the server supports, just use our first choice */
1700 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[0];
1703 for (o
= 0; !found
&& (o
< nmp
->nm_sec
.count
); o
++) {
1704 for (s
= 0; !found
&& (s
< nmp
->nm_servsec
.count
); s
++) {
1705 if (nmp
->nm_sec
.flavors
[o
] == nmp
->nm_servsec
.flavors
[s
]) {
1706 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[o
];
1712 /* Choose the first one we support from the server's list. */
1713 if (!nmp
->nm_servsec
.count
) {
1714 nmp
->nm_auth
= RPCAUTH_SYS
;
1717 for (s
= 0; s
< nmp
->nm_servsec
.count
; s
++) {
1718 switch (nmp
->nm_servsec
.flavors
[s
]) {
1720 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1721 if (found
&& (nmp
->nm_auth
== RPCAUTH_NONE
)) {
1730 nmp
->nm_auth
= nmp
->nm_servsec
.flavors
[s
];
1737 error
= !found
? EAUTH
: 0;
1739 NFS_ZFREE(ZV_NAMEI
, path
);
1741 nfs_socket_search_update_error(&nss
, error
);
1742 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1743 nfs_socket_destroy(nso
);
1747 NFS_ZFREE(nfs_fhandle_zone
, nmp
->nm_fh
);
1751 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_CALLUMNT
);
1754 /* put the real upcall in place */
1755 upcall
= (nso
->nso_sotype
== SOCK_STREAM
) ? nfs_tcp_rcv
: nfs_udp_rcv
;
1756 error
= sock_setupcall(nso
->nso_so
, upcall
, nmp
);
1758 nfs_socket_search_update_error(&nss
, error
);
1759 nfs_socket_destroy(nso
);
1763 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1764 /* set mntfromname to this location */
1765 if (!NM_OMATTR_GIVEN(nmp
, MNTFROM
)) {
1766 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
,
1767 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1768 sizeof(vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
), 0);
1770 /* some negotiated values need to remain unchanged for the life of the mount */
1771 if (!nmp
->nm_sotype
) {
1772 nmp
->nm_sotype
= nso
->nso_sotype
;
1774 if (!nmp
->nm_vers
) {
1775 nmp
->nm_vers
= nfsvers
;
1777 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1778 if ((nfsvers
>= NFS_VER4
) && !NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1779 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1780 port
= ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
1781 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
1782 port
= ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
1786 if (port
== NFS_PORT
) {
1787 nmp
->nm_nfsport
= NFS_PORT
;
1793 /* do some version-specific pre-mount set up */
1794 if (nmp
->nm_vers
>= NFS_VER4
) {
1796 nmp
->nm_mounttime
= ((uint64_t)now
.tv_sec
<< 32) | now
.tv_usec
;
1797 if (!NMFLAG(nmp
, NOCALLBACK
)) {
1798 nfs4_mount_callback_setup(nmp
);
1804 /* Initialize NFS socket state variables */
1805 lck_mtx_lock(&nmp
->nm_lock
);
1806 nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] =
1807 nmp
->nm_srtt
[3] = (NFS_TIMEO
<< 3);
1808 nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
1809 nmp
->nm_sdrtt
[3] = 0;
1810 if (nso
->nso_sotype
== SOCK_DGRAM
) {
1811 nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
1813 } else if (nso
->nso_sotype
== SOCK_STREAM
) {
1814 nmp
->nm_timeouts
= 0;
1816 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1817 nmp
->nm_sockflags
|= NMSOCK_SETUP
;
1818 /* move the socket to the mount structure */
1820 oldsaddr
= nmp
->nm_saddr
;
1821 nmp
->nm_saddr
= nso
->nso_saddr
;
1822 lck_mtx_unlock(&nmp
->nm_lock
);
1823 error
= nfs_connect_setup(nmp
);
1824 lck_mtx_lock(&nmp
->nm_lock
);
1825 nmp
->nm_sockflags
&= ~NMSOCK_SETUP
;
1827 nmp
->nm_sockflags
|= NMSOCK_READY
;
1828 wakeup(&nmp
->nm_sockflags
);
1831 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1832 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1833 nfs_socket_search_update_error(&nss
, error
);
1834 nmp
->nm_saddr
= oldsaddr
;
1835 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1836 /* undo settings made prior to setup */
1837 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_SOCKET_TYPE
)) {
1841 if (nmp
->nm_vers
>= NFS_VER4
) {
1842 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1843 nmp
->nm_nfsport
= 0;
1846 nfs4_mount_callback_shutdown(nmp
);
1848 if (IS_VALID_CRED(nmp
->nm_mcred
)) {
1849 kauth_cred_unref(&nmp
->nm_mcred
);
1851 bzero(&nmp
->nm_un
, sizeof(nmp
->nm_un
));
1856 lck_mtx_unlock(&nmp
->nm_lock
);
1858 nfs_socket_destroy(nso
);
1862 /* update current location */
1863 if ((nmp
->nm_locations
.nl_current
.nli_flags
& NLI_VALID
) &&
1864 (nmp
->nm_locations
.nl_current
.nli_serv
!= nso
->nso_location
.nli_serv
)) {
1865 /* server has changed, we should initiate failover/recovery */
1868 nmp
->nm_locations
.nl_current
= nso
->nso_location
;
1869 nmp
->nm_locations
.nl_current
.nli_flags
|= NLI_VALID
;
1871 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1872 /* We have now successfully connected... make a note of it. */
1873 nmp
->nm_sockflags
|= NMSOCK_HASCONNECTED
;
1876 lck_mtx_unlock(&nmp
->nm_lock
);
1878 FREE(oldsaddr
, M_SONAME
);
1881 if (nss
.nss_flags
& NSS_WARNED
) {
1882 log(LOG_INFO
, "nfs_connect: socket connect completed for %s\n",
1883 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1887 nfs_socket_search_cleanup(&nss
);
1889 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1892 NFS_ZFREE(ZV_NAMEI
, path
);
1894 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1899 /* setup & confirm socket connection is functional */
1905 struct nfsmount
*nmp
)
1909 if (nmp
->nm_vers
>= NFS_VER4
) {
1910 if (nmp
->nm_state
& NFSSTA_CLIENTID
) {
1911 /* first, try to renew our current state */
1912 error
= nfs4_renew(nmp
, R_SETUP
);
1913 if ((error
== NFSERR_ADMIN_REVOKED
) ||
1914 (error
== NFSERR_CB_PATH_DOWN
) ||
1915 (error
== NFSERR_EXPIRED
) ||
1916 (error
== NFSERR_LEASE_MOVED
) ||
1917 (error
== NFSERR_STALE_CLIENTID
)) {
1918 lck_mtx_lock(&nmp
->nm_lock
);
1919 nfs_need_recover(nmp
, error
);
1920 lck_mtx_unlock(&nmp
->nm_lock
);
1923 error
= nfs4_setclientid(nmp
);
1930 * NFS socket reconnect routine:
1931 * Called when a connection is broken.
1932 * - disconnect the old socket
1933 * - nfs_connect() again
1934 * - set R_MUSTRESEND for all outstanding requests on mount point
1935 * If this fails the mount point is DEAD!
1938 nfs_reconnect(struct nfsmount
*nmp
)
1942 thread_t thd
= current_thread();
1943 int error
, wentdown
= 0, verbose
= 1;
1948 lastmsg
= now
.tv_sec
- (nmp
->nm_tprintf_delay
- nmp
->nm_tprintf_initial_delay
);
1950 nfs_disconnect(nmp
);
1953 lck_mtx_lock(&nmp
->nm_lock
);
1954 timeo
= nfs_is_squishy(nmp
) ? 8 : 30;
1955 lck_mtx_unlock(&nmp
->nm_lock
);
1957 while ((error
= nfs_connect(nmp
, verbose
, timeo
))) {
1959 nfs_disconnect(nmp
);
1960 if ((error
== EINTR
) || (error
== ERESTART
)) {
1967 if ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
) {
1968 lastmsg
= now
.tv_sec
;
1969 nfs_down(nmp
, thd
, error
, NFSSTA_TIMEO
, "can not connect", 0);
1972 lck_mtx_lock(&nmp
->nm_lock
);
1973 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1974 /* we're not yet completely mounted and */
1975 /* we can't reconnect, so we fail */
1976 lck_mtx_unlock(&nmp
->nm_lock
);
1977 NFS_SOCK_DBG("Not mounted returning %d\n", error
);
1981 if (nfs_mount_check_dead_timeout(nmp
)) {
1982 nfs_mount_make_zombie(nmp
);
1983 lck_mtx_unlock(&nmp
->nm_lock
);
1987 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1988 lck_mtx_unlock(&nmp
->nm_lock
);
1991 lck_mtx_unlock(&nmp
->nm_lock
);
1992 tsleep(nfs_reconnect
, PSOCK
, "nfs_reconnect_delay", 2 * hz
);
1993 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1999 nfs_up(nmp
, thd
, NFSSTA_TIMEO
, "connected");
2003 * Loop through outstanding request list and mark all requests
2004 * as needing a resend. (Though nfs_need_reconnect() probably
2005 * marked them all already.)
2007 lck_mtx_lock(&nfs_request_mutex
);
2008 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
2009 if (rq
->r_nmp
== nmp
) {
2010 lck_mtx_lock(&rq
->r_mtx
);
2011 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
2012 rq
->r_flags
|= R_MUSTRESEND
;
2015 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
2016 nfs_asyncio_resend(rq
);
2019 lck_mtx_unlock(&rq
->r_mtx
);
2022 lck_mtx_unlock(&nfs_request_mutex
);
2027 * NFS disconnect. Clean up and unlink.
2030 nfs_disconnect(struct nfsmount
*nmp
)
2032 struct nfs_socket
*nso
;
2034 lck_mtx_lock(&nmp
->nm_lock
);
2037 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2038 if (nmp
->nm_state
& NFSSTA_SENDING
) { /* wait for sending to complete */
2039 nmp
->nm_state
|= NFSSTA_WANTSND
;
2040 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitsending", &ts
);
2043 if (nmp
->nm_sockflags
& NMSOCK_POKE
) { /* wait for poking to complete */
2044 msleep(&nmp
->nm_sockflags
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitpoke", &ts
);
2047 nmp
->nm_sockflags
|= NMSOCK_DISCONNECTING
;
2048 nmp
->nm_sockflags
&= ~NMSOCK_READY
;
2051 if (nso
->nso_saddr
== nmp
->nm_saddr
) {
2052 nso
->nso_saddr
= NULL
;
2054 lck_mtx_unlock(&nmp
->nm_lock
);
2055 nfs_socket_destroy(nso
);
2056 lck_mtx_lock(&nmp
->nm_lock
);
2057 nmp
->nm_sockflags
&= ~NMSOCK_DISCONNECTING
;
2058 lck_mtx_unlock(&nmp
->nm_lock
);
2060 lck_mtx_unlock(&nmp
->nm_lock
);
2065 * mark an NFS mount as needing a reconnect/resends.
2068 nfs_need_reconnect(struct nfsmount
*nmp
)
2072 lck_mtx_lock(&nmp
->nm_lock
);
2073 nmp
->nm_sockflags
&= ~(NMSOCK_READY
| NMSOCK_SETUP
);
2074 lck_mtx_unlock(&nmp
->nm_lock
);
2077 * Loop through outstanding request list and
2078 * mark all requests as needing a resend.
2080 lck_mtx_lock(&nfs_request_mutex
);
2081 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
2082 if (rq
->r_nmp
== nmp
) {
2083 lck_mtx_lock(&rq
->r_mtx
);
2084 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
2085 rq
->r_flags
|= R_MUSTRESEND
;
2088 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
2089 nfs_asyncio_resend(rq
);
2092 lck_mtx_unlock(&rq
->r_mtx
);
2095 lck_mtx_unlock(&nfs_request_mutex
);
2100 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2103 nfs_mount_sock_thread(void *arg
, __unused wait_result_t wr
)
2105 struct nfsmount
*nmp
= arg
;
2106 struct timespec ts
= { .tv_sec
= 30, .tv_nsec
= 0 };
2107 thread_t thd
= current_thread();
2110 int error
, dofinish
;
2112 int do_reconnect_sleep
= 0;
2114 lck_mtx_lock(&nmp
->nm_lock
);
2115 while (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2116 !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
2117 !LIST_EMPTY(&nmp
->nm_monlist
) ||
2118 nmp
->nm_deadto_start
||
2119 (nmp
->nm_state
& NFSSTA_RECOVER
) ||
2120 ((nmp
->nm_vers
>= NFS_VER4
) && !TAILQ_EMPTY(&nmp
->nm_dreturnq
))) {
2121 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
2124 /* do reconnect, if necessary */
2125 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2126 if (nmp
->nm_reconnect_start
<= 0) {
2128 nmp
->nm_reconnect_start
= now
.tv_sec
;
2130 lck_mtx_unlock(&nmp
->nm_lock
);
2131 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
2133 * XXX We don't want to call reconnect again right away if returned errors
2134 * before that may not have blocked. This has caused spamming null procs
2135 * from machines in the pass.
2137 if (do_reconnect_sleep
) {
2138 tsleep(nfs_mount_sock_thread
, PSOCK
, "nfs_reconnect_sock_thread_delay", hz
);
2140 error
= nfs_reconnect(nmp
);
2143 if (error
== EIO
|| error
== EINTR
) {
2144 lvl
= (do_reconnect_sleep
++ % 600) ? 7 : 0;
2146 NFS_DBG(NFS_FAC_SOCK
, lvl
, "nfs reconnect %s: returned %d\n",
2147 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
2149 nmp
->nm_reconnect_start
= 0;
2150 do_reconnect_sleep
= 0;
2152 lck_mtx_lock(&nmp
->nm_lock
);
2154 if ((nmp
->nm_sockflags
& NMSOCK_READY
) &&
2155 (nmp
->nm_state
& NFSSTA_RECOVER
) &&
2156 !(nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2157 !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2158 /* perform state recovery */
2159 lck_mtx_unlock(&nmp
->nm_lock
);
2161 lck_mtx_lock(&nmp
->nm_lock
);
2164 /* handle NFSv4 delegation returns */
2165 while ((nmp
->nm_vers
>= NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
2166 (nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
) &&
2167 ((np
= TAILQ_FIRST(&nmp
->nm_dreturnq
)))) {
2168 lck_mtx_unlock(&nmp
->nm_lock
);
2169 nfs4_delegation_return(np
, R_RECOVER
, thd
, nmp
->nm_mcred
);
2170 lck_mtx_lock(&nmp
->nm_lock
);
2173 /* do resends, if necessary/possible */
2174 while ((((nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) ||
2175 (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) &&
2176 ((req
= TAILQ_FIRST(&nmp
->nm_resendq
)))) {
2177 if (req
->r_resendtime
) {
2180 while (req
&& !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) && req
->r_resendtime
&& (now
.tv_sec
< req
->r_resendtime
)) {
2181 req
= TAILQ_NEXT(req
, r_rchain
);
2186 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2187 lck_mtx_unlock(&nmp
->nm_lock
);
2188 lck_mtx_lock(&req
->r_mtx
);
2189 lck_mtx_lock(&nmp
->nm_lock
);
2190 if ((req
->r_flags
& R_RESENDQ
) == 0 || (req
->r_rchain
.tqe_next
== NFSREQNOLIST
)) {
2191 lck_mtx_unlock(&req
->r_mtx
);
2194 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
2195 req
->r_flags
&= ~R_RESENDQ
;
2196 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
2197 lck_mtx_unlock(&nmp
->nm_lock
);
2198 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2199 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
2200 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2202 lck_mtx_unlock(&req
->r_mtx
);
2204 nfs_asyncio_finish(req
);
2206 nfs_request_rele(req
);
2207 lck_mtx_lock(&nmp
->nm_lock
);
2210 if ((req
->r_flags
& R_RESTART
) || nfs_request_using_gss(req
)) {
2211 req
->r_flags
&= ~R_RESTART
;
2212 req
->r_resendtime
= 0;
2213 lck_mtx_unlock(&req
->r_mtx
);
2214 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2215 nfs_reqdequeue(req
);
2217 if (nfs_request_using_gss(req
)) {
2218 nfs_gss_clnt_rpcdone(req
);
2219 error
= nfs_gss_clnt_args_restore(req
);
2220 if (error
== ENEEDAUTH
) {
2224 #endif /* CONFIG_NFS_GSS */
2225 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2226 nfs_request_using_gss(req
) ? " gss" : "", req
->r_procnum
, req
->r_xid
,
2227 req
->r_flags
, req
->r_rtt
);
2228 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2230 error
= nfs_request_add_header(req
);
2233 error
= nfs_request_send(req
, 0);
2235 lck_mtx_lock(&req
->r_mtx
);
2237 req
->r_error
= error
;
2240 dofinish
= error
&& req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2241 lck_mtx_unlock(&req
->r_mtx
);
2243 nfs_asyncio_finish(req
);
2245 nfs_request_rele(req
);
2246 lck_mtx_lock(&nmp
->nm_lock
);
2250 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2251 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
2252 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2254 req
->r_flags
|= R_SENDING
;
2255 lck_mtx_unlock(&req
->r_mtx
);
2256 error
= nfs_send(req
, 0);
2257 lck_mtx_lock(&req
->r_mtx
);
2260 lck_mtx_unlock(&req
->r_mtx
);
2261 nfs_request_rele(req
);
2262 lck_mtx_lock(&nmp
->nm_lock
);
2266 req
->r_error
= error
;
2268 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2269 lck_mtx_unlock(&req
->r_mtx
);
2271 nfs_asyncio_finish(req
);
2273 nfs_request_rele(req
);
2274 lck_mtx_lock(&nmp
->nm_lock
);
2276 if (nfs_mount_check_dead_timeout(nmp
)) {
2277 nfs_mount_make_zombie(nmp
);
2281 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
2284 /* check monitored nodes, if necessary/possible */
2285 if (!LIST_EMPTY(&nmp
->nm_monlist
)) {
2286 nmp
->nm_state
|= NFSSTA_MONITOR_SCAN
;
2287 LIST_FOREACH(np
, &nmp
->nm_monlist
, n_monlink
) {
2288 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2289 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2292 np
->n_mflag
|= NMMONSCANINPROG
;
2293 lck_mtx_unlock(&nmp
->nm_lock
);
2294 error
= nfs_getattr(np
, NULL
, vfs_context_kernel(), (NGA_UNCACHED
| NGA_MONITOR
));
2295 if (!error
&& ISSET(np
->n_flag
, NUPDATESIZE
)) { /* update quickly to avoid multiple events */
2296 nfs_data_update_size(np
, 0);
2298 lck_mtx_lock(&nmp
->nm_lock
);
2299 np
->n_mflag
&= ~NMMONSCANINPROG
;
2300 if (np
->n_mflag
& NMMONSCANWANT
) {
2301 np
->n_mflag
&= ~NMMONSCANWANT
;
2302 wakeup(&np
->n_mflag
);
2304 if (error
|| !(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2305 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2309 nmp
->nm_state
&= ~NFSSTA_MONITOR_SCAN
;
2310 if (nmp
->nm_state
& NFSSTA_UNMOUNTING
) {
2311 wakeup(&nmp
->nm_state
); /* let unmounting thread know scan is done */
2314 if ((nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
))) {
2315 if (nmp
->nm_deadto_start
|| !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
2316 (nmp
->nm_state
& NFSSTA_RECOVER
)) {
2321 msleep(&nmp
->nm_sockthd
, &nmp
->nm_lock
, PSOCK
, "nfssockthread", &ts
);
2325 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2326 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2327 (nmp
->nm_state
& NFSSTA_MOUNTED
) && NMFLAG(nmp
, CALLUMNT
) &&
2328 (nmp
->nm_vers
< NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2329 lck_mtx_unlock(&nmp
->nm_lock
);
2330 nfs3_umount_rpc(nmp
, vfs_context_kernel(),
2331 (nmp
->nm_sockflags
& NMSOCK_READY
) ? 6 : 2);
2332 lck_mtx_lock(&nmp
->nm_lock
);
2335 if (nmp
->nm_sockthd
== thd
) {
2336 nmp
->nm_sockthd
= NULL
;
2338 lck_mtx_unlock(&nmp
->nm_lock
);
2339 wakeup(&nmp
->nm_sockthd
);
2340 thread_terminate(thd
);
2343 /* start or wake a mount's socket thread */
2345 nfs_mount_sock_thread_wake(struct nfsmount
*nmp
)
2347 if (nmp
->nm_sockthd
) {
2348 wakeup(&nmp
->nm_sockthd
);
2349 } else if (kernel_thread_start(nfs_mount_sock_thread
, nmp
, &nmp
->nm_sockthd
) == KERN_SUCCESS
) {
2350 thread_deallocate(nmp
->nm_sockthd
);
2355 * Check if we should mark the mount dead because the
2356 * unresponsive mount has reached the dead timeout.
2357 * (must be called with nmp locked)
2360 nfs_mount_check_dead_timeout(struct nfsmount
*nmp
)
2364 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2367 if (nmp
->nm_deadto_start
== 0) {
2370 nfs_is_squishy(nmp
);
2371 if (nmp
->nm_curdeadtimeout
<= 0) {
2375 if ((now
.tv_sec
- nmp
->nm_deadto_start
) < nmp
->nm_curdeadtimeout
) {
2382 * Call nfs_mount_zombie to remove most of the
2383 * nfs state for the mount, and then ask to be forcibly unmounted.
2385 * Assumes the nfs mount structure lock nm_lock is held.
2389 nfs_mount_make_zombie(struct nfsmount
*nmp
)
2397 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2401 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
2402 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
2403 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
2404 lck_mtx_unlock(&nmp
->nm_lock
);
2405 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
2406 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
2407 lck_mtx_lock(&nmp
->nm_lock
);
2412 * NFS callback channel socket state
2414 struct nfs_callback_socket
{
2415 TAILQ_ENTRY(nfs_callback_socket
) ncbs_link
;
2416 socket_t ncbs_so
; /* the socket */
2417 struct sockaddr_storage ncbs_saddr
; /* socket address */
2418 struct nfs_rpc_record_state ncbs_rrs
; /* RPC record parsing state */
2419 time_t ncbs_stamp
; /* last accessed at */
2420 uint32_t ncbs_flags
; /* see below */
2422 #define NCBSOCK_UPCALL 0x0001
2423 #define NCBSOCK_UPCALLWANT 0x0002
2424 #define NCBSOCK_DEAD 0x0004
2428 * NFS callback channel state
2430 * One listening socket for accepting socket connections from servers and
2431 * a list of connected sockets to handle callback requests on.
2432 * Mounts registered with the callback channel are assigned IDs and
2433 * put on a list so that the callback request handling code can match
2434 * the requests up with mounts.
2436 socket_t nfs4_cb_so
= NULL
;
2437 socket_t nfs4_cb_so6
= NULL
;
2438 in_port_t nfs4_cb_port
= 0;
2439 in_port_t nfs4_cb_port6
= 0;
2440 uint32_t nfs4_cb_id
= 0;
2441 uint32_t nfs4_cb_so_usecount
= 0;
2442 TAILQ_HEAD(nfs4_cb_sock_list
, nfs_callback_socket
) nfs4_cb_socks
;
2443 TAILQ_HEAD(nfs4_cb_mount_list
, nfsmount
) nfs4_cb_mounts
;
2445 int nfs4_cb_handler(struct nfs_callback_socket
*, mbuf_t
);
2448 * Set up the callback channel for the NFS mount.
2450 * Initializes the callback channel socket state and
2451 * assigns a callback ID to the mount.
2454 nfs4_mount_callback_setup(struct nfsmount
*nmp
)
2456 struct sockaddr_in sin
;
2457 struct sockaddr_in6 sin6
;
2459 socket_t so6
= NULL
;
2460 struct timeval timeo
;
2464 lck_mtx_lock(&nfs_global_mutex
);
2465 if (nfs4_cb_id
== 0) {
2466 TAILQ_INIT(&nfs4_cb_mounts
);
2467 TAILQ_INIT(&nfs4_cb_socks
);
2470 nmp
->nm_cbid
= nfs4_cb_id
++;
2471 if (nmp
->nm_cbid
== 0) {
2472 nmp
->nm_cbid
= nfs4_cb_id
++;
2474 nfs4_cb_so_usecount
++;
2475 TAILQ_INSERT_HEAD(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2478 lck_mtx_unlock(&nfs_global_mutex
);
2483 error
= sock_socket(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so
);
2485 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv4 socket\n", error
);
2490 if (NFS_PORT_INVALID(nfs_callback_port
)) {
2492 log(LOG_INFO
, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error
, nfs_callback_port
);
2496 sock_setsockopt(so
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2497 sin
.sin_len
= sizeof(struct sockaddr_in
);
2498 sin
.sin_family
= AF_INET
;
2499 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
2500 sin
.sin_port
= htons((in_port_t
)nfs_callback_port
); /* try to use specified port */
2501 error
= sock_bind(so
, (struct sockaddr
*)&sin
);
2503 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv4 socket\n", error
);
2506 error
= sock_getsockname(so
, (struct sockaddr
*)&sin
, sin
.sin_len
);
2508 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv4 socket port\n", error
);
2511 nfs4_cb_port
= ntohs(sin
.sin_port
);
2513 error
= sock_listen(so
, 32);
2515 log(LOG_INFO
, "nfs callback setup: error %d on IPv4 listen\n", error
);
2519 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2522 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2524 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error
);
2526 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2528 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error
);
2530 sock_setsockopt(so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2531 sock_setsockopt(so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2532 sock_setsockopt(so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2536 error
= sock_socket(AF_INET6
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so6
);
2538 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv6 socket\n", error
);
2543 sock_setsockopt(so6
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2544 sock_setsockopt(so6
, IPPROTO_IPV6
, IPV6_V6ONLY
, &on
, sizeof(on
));
2545 /* try to use specified port or same port as IPv4 */
2546 port
= nfs_callback_port
? (in_port_t
)nfs_callback_port
: nfs4_cb_port
;
2548 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2549 sin6
.sin6_family
= AF_INET6
;
2550 sin6
.sin6_addr
= in6addr_any
;
2551 sin6
.sin6_port
= htons(port
);
2552 error
= sock_bind(so6
, (struct sockaddr
*)&sin6
);
2554 if (port
!= nfs_callback_port
) {
2555 /* if we simply tried to match the IPv4 port, then try any port */
2557 goto ipv6_bind_again
;
2559 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv6 socket\n", error
);
2562 error
= sock_getsockname(so6
, (struct sockaddr
*)&sin6
, sin6
.sin6_len
);
2564 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv6 socket port\n", error
);
2567 nfs4_cb_port6
= ntohs(sin6
.sin6_port
);
2569 error
= sock_listen(so6
, 32);
2571 log(LOG_INFO
, "nfs callback setup: error %d on IPv6 listen\n", error
);
2575 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2578 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2580 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error
);
2582 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2584 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error
);
2586 sock_setsockopt(so6
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2587 sock_setsockopt(so6
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2588 sock_setsockopt(so6
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2593 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2594 lck_mtx_unlock(&nfs_global_mutex
);
2596 sock_shutdown(so
, SHUT_RDWR
);
2600 sock_shutdown(so6
, SHUT_RDWR
);
2604 lck_mtx_unlock(&nfs_global_mutex
);
2609 * Shut down the callback channel for the NFS mount.
2611 * Clears the mount's callback ID and releases the mounts
2612 * reference on the callback socket. Last reference dropped
2613 * will also shut down the callback socket(s).
2616 nfs4_mount_callback_shutdown(struct nfsmount
*nmp
)
2618 struct nfs_callback_socket
*ncbsp
;
2620 struct nfs4_cb_sock_list cb_socks
;
2621 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2623 lck_mtx_lock(&nfs_global_mutex
);
2624 if (nmp
->nm_cbid
== 0) {
2625 lck_mtx_unlock(&nfs_global_mutex
);
2628 TAILQ_REMOVE(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2629 /* wait for any callbacks in progress to complete */
2630 while (nmp
->nm_cbrefs
) {
2631 msleep(&nmp
->nm_cbrefs
, &nfs_global_mutex
, PSOCK
, "cbshutwait", &ts
);
2634 if (--nfs4_cb_so_usecount
) {
2635 lck_mtx_unlock(&nfs_global_mutex
);
2640 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2641 TAILQ_INIT(&cb_socks
);
2642 TAILQ_CONCAT(&cb_socks
, &nfs4_cb_socks
, ncbs_link
);
2643 lck_mtx_unlock(&nfs_global_mutex
);
2645 sock_shutdown(so
, SHUT_RDWR
);
2649 sock_shutdown(so6
, SHUT_RDWR
);
2652 while ((ncbsp
= TAILQ_FIRST(&cb_socks
))) {
2653 TAILQ_REMOVE(&cb_socks
, ncbsp
, ncbs_link
);
2654 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2655 sock_close(ncbsp
->ncbs_so
);
2656 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2657 FREE(ncbsp
, M_TEMP
);
2662 * Check periodically for stale/unused nfs callback sockets
2664 #define NFS4_CB_TIMER_PERIOD 30
2665 #define NFS4_CB_IDLE_MAX 300
2667 nfs4_callback_timer(__unused
void *param0
, __unused
void *param1
)
2669 struct nfs_callback_socket
*ncbsp
, *nextncbsp
;
2673 lck_mtx_lock(&nfs_global_mutex
);
2674 if (TAILQ_EMPTY(&nfs4_cb_socks
)) {
2675 nfs4_callback_timer_on
= 0;
2676 lck_mtx_unlock(&nfs_global_mutex
);
2680 TAILQ_FOREACH_SAFE(ncbsp
, &nfs4_cb_socks
, ncbs_link
, nextncbsp
) {
2681 if (!(ncbsp
->ncbs_flags
& NCBSOCK_DEAD
) &&
2682 (now
.tv_sec
< (ncbsp
->ncbs_stamp
+ NFS4_CB_IDLE_MAX
))) {
2685 TAILQ_REMOVE(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2686 lck_mtx_unlock(&nfs_global_mutex
);
2687 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2688 sock_close(ncbsp
->ncbs_so
);
2689 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2690 FREE(ncbsp
, M_TEMP
);
2693 nfs4_callback_timer_on
= 1;
2694 nfs_interval_timer_start(nfs4_callback_timer_call
,
2695 NFS4_CB_TIMER_PERIOD
* 1000);
2696 lck_mtx_unlock(&nfs_global_mutex
);
2700 * Accept a new callback socket.
2703 nfs4_cb_accept(socket_t so
, __unused
void *arg
, __unused
int waitflag
)
2705 socket_t newso
= NULL
;
2706 struct nfs_callback_socket
*ncbsp
;
2707 struct nfsmount
*nmp
;
2708 struct timeval timeo
, now
;
2709 int error
, on
= 1, ip
;
2711 if (so
== nfs4_cb_so
) {
2713 } else if (so
== nfs4_cb_so6
) {
2719 /* allocate/initialize a new nfs_callback_socket */
2720 MALLOC(ncbsp
, struct nfs_callback_socket
*, sizeof(struct nfs_callback_socket
), M_TEMP
, M_WAITOK
);
2722 log(LOG_ERR
, "nfs callback accept: no memory for new socket\n");
2725 bzero(ncbsp
, sizeof(*ncbsp
));
2726 ncbsp
->ncbs_saddr
.ss_len
= (ip
== 4) ? sizeof(struct sockaddr_in
) : sizeof(struct sockaddr_in6
);
2727 nfs_rpc_record_state_init(&ncbsp
->ncbs_rrs
);
2729 /* accept a new socket */
2730 error
= sock_accept(so
, (struct sockaddr
*)&ncbsp
->ncbs_saddr
,
2731 ncbsp
->ncbs_saddr
.ss_len
, MSG_DONTWAIT
,
2732 nfs4_cb_rcv
, ncbsp
, &newso
);
2734 log(LOG_INFO
, "nfs callback accept: error %d accepting IPv%d socket\n", error
, ip
);
2735 FREE(ncbsp
, M_TEMP
);
2739 /* set up the new socket */
2740 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2743 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2745 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error
, ip
);
2747 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2749 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error
, ip
);
2751 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2752 sock_setsockopt(newso
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2753 sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2754 sock_setsockopt(newso
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2756 ncbsp
->ncbs_so
= newso
;
2758 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2760 lck_mtx_lock(&nfs_global_mutex
);
2762 /* add it to the list */
2763 TAILQ_INSERT_HEAD(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2765 /* verify it's from a host we have mounted */
2766 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2767 /* check if socket's source address matches this mount's server address */
2768 if (!nmp
->nm_saddr
) {
2771 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2775 if (!nmp
) { /* we don't want this socket, mark it dead */
2776 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2779 /* make sure the callback socket cleanup timer is running */
2780 /* (shorten the timer if we've got a socket we don't want) */
2781 if (!nfs4_callback_timer_on
) {
2782 nfs4_callback_timer_on
= 1;
2783 nfs_interval_timer_start(nfs4_callback_timer_call
,
2784 !nmp
? 500 : (NFS4_CB_TIMER_PERIOD
* 1000));
2785 } else if (!nmp
&& (nfs4_callback_timer_on
< 2)) {
2786 nfs4_callback_timer_on
= 2;
2787 thread_call_cancel(nfs4_callback_timer_call
);
2788 nfs_interval_timer_start(nfs4_callback_timer_call
, 500);
2791 lck_mtx_unlock(&nfs_global_mutex
);
2795 * Receive mbufs from callback sockets into RPC records and process each record.
2796 * Detect connection has been closed and shut down.
2799 nfs4_cb_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
2801 struct nfs_callback_socket
*ncbsp
= arg
;
2802 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2805 int error
= 0, recv
= 1;
2807 lck_mtx_lock(&nfs_global_mutex
);
2808 while (ncbsp
->ncbs_flags
& NCBSOCK_UPCALL
) {
2809 /* wait if upcall is already in progress */
2810 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALLWANT
;
2811 msleep(ncbsp
, &nfs_global_mutex
, PSOCK
, "cbupcall", &ts
);
2813 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALL
;
2814 lck_mtx_unlock(&nfs_global_mutex
);
2816 /* loop while we make error-free progress */
2817 while (!error
&& recv
) {
2818 error
= nfs_rpc_record_read(so
, &ncbsp
->ncbs_rrs
, MSG_DONTWAIT
, &recv
, &m
);
2819 if (m
) { /* handle the request */
2820 error
= nfs4_cb_handler(ncbsp
, m
);
2824 /* note: no error and no data indicates server closed its end */
2825 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
2827 * Socket is either being closed or should be.
2828 * We can't close the socket in the context of the upcall.
2829 * So we mark it as dead and leave it for the cleanup timer to reap.
2831 ncbsp
->ncbs_stamp
= 0;
2832 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2835 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2838 lck_mtx_lock(&nfs_global_mutex
);
2839 ncbsp
->ncbs_flags
&= ~NCBSOCK_UPCALL
;
2840 lck_mtx_unlock(&nfs_global_mutex
);
2845 * Handle an NFS callback channel request.
2848 nfs4_cb_handler(struct nfs_callback_socket
*ncbsp
, mbuf_t mreq
)
2850 socket_t so
= ncbsp
->ncbs_so
;
2851 struct nfsm_chain nmreq
, nmrep
;
2852 mbuf_t mhead
= NULL
, mrest
= NULL
, m
;
2854 struct nfsmount
*nmp
;
2857 nfs_stateid stateid
;
2858 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], rbitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
, truncate
, attrbytes
;
2859 uint32_t val
, xid
, procnum
, taglen
, cbid
, numops
, op
, status
;
2860 uint32_t auth_type
, auth_len
;
2861 uint32_t numres
, *pnumres
;
2862 int error
= 0, replen
, len
;
2865 xid
= numops
= op
= status
= procnum
= taglen
= cbid
= 0;
2866 fh
= zalloc(nfs_fhandle_zone
);
2868 nfsm_chain_dissect_init(error
, &nmreq
, mreq
);
2869 nfsm_chain_get_32(error
, &nmreq
, xid
); // RPC XID
2870 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Call
2871 nfsm_assert(error
, (val
== RPC_CALL
), EBADRPC
);
2872 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Version
2873 nfsm_assert(error
, (val
== RPC_VER2
), ERPCMISMATCH
);
2874 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Program Number
2875 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG
), EPROGUNAVAIL
);
2876 nfsm_chain_get_32(error
, &nmreq
, val
); // NFS Callback Program Version Number
2877 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG_VERSION
), EPROGMISMATCH
);
2878 nfsm_chain_get_32(error
, &nmreq
, procnum
); // NFS Callback Procedure Number
2879 nfsm_assert(error
, (procnum
<= NFSPROC4_CB_COMPOUND
), EPROCUNAVAIL
);
2881 /* Handle authentication */
2882 /* XXX just ignore auth for now - handling kerberos may be tricky */
2883 nfsm_chain_get_32(error
, &nmreq
, auth_type
); // RPC Auth Flavor
2884 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // RPC Auth Length
2885 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2886 if (!error
&& (auth_len
> 0)) {
2887 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2889 nfsm_chain_adv(error
, &nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
2890 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // verifier length
2891 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2892 if (!error
&& (auth_len
> 0)) {
2893 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2902 case NFSPROC4_CB_NULL
:
2903 status
= NFSERR_RETVOID
;
2905 case NFSPROC4_CB_COMPOUND
:
2906 /* tag, minorversion, cb ident, numops, op array */
2907 nfsm_chain_get_32(error
, &nmreq
, taglen
); /* tag length */
2908 nfsm_assert(error
, (val
<= NFS4_OPAQUE_LIMIT
), EBADRPC
);
2910 /* start building the body of the response */
2911 nfsm_mbuf_get(error
, &mrest
, nfsm_rndup(taglen
) + 5 * NFSX_UNSIGNED
);
2912 nfsm_chain_init(&nmrep
, mrest
);
2914 /* copy tag from request to response */
2915 nfsm_chain_add_32(error
, &nmrep
, taglen
); /* tag length */
2916 for (len
= (int)taglen
; !error
&& (len
> 0); len
-= NFSX_UNSIGNED
) {
2917 nfsm_chain_get_32(error
, &nmreq
, val
);
2918 nfsm_chain_add_32(error
, &nmrep
, val
);
2921 /* insert number of results placeholder */
2923 nfsm_chain_add_32(error
, &nmrep
, numres
);
2924 pnumres
= (uint32_t*)(nmrep
.nmc_ptr
- NFSX_UNSIGNED
);
2926 nfsm_chain_get_32(error
, &nmreq
, val
); /* minorversion */
2927 nfsm_assert(error
, (val
== 0), NFSERR_MINOR_VERS_MISMATCH
);
2928 nfsm_chain_get_32(error
, &nmreq
, cbid
); /* callback ID */
2929 nfsm_chain_get_32(error
, &nmreq
, numops
); /* number of operations */
2931 if ((error
== EBADRPC
) || (error
== NFSERR_MINOR_VERS_MISMATCH
)) {
2933 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
2934 status
= NFSERR_RESOURCE
;
2936 status
= NFSERR_SERVERFAULT
;
2939 nfsm_chain_null(&nmrep
);
2942 /* match the callback ID to a registered mount */
2943 lck_mtx_lock(&nfs_global_mutex
);
2944 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2945 if (nmp
->nm_cbid
!= cbid
) {
2948 /* verify socket's source address matches this mount's server address */
2949 if (!nmp
->nm_saddr
) {
2952 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2956 /* mark the NFS mount as busy */
2960 lck_mtx_unlock(&nfs_global_mutex
);
2962 /* if no mount match, just drop socket. */
2964 nfsm_chain_null(&nmrep
);
2968 /* process ops, adding results to mrest */
2969 while (numops
> 0) {
2971 nfsm_chain_get_32(error
, &nmreq
, op
);
2976 case NFS_OP_CB_GETATTR
:
2977 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2979 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, fh
);
2980 bmlen
= NFS_ATTR_BITMAP_LEN
;
2981 nfsm_chain_get_bitmap(error
, &nmreq
, bitmap
, bmlen
);
2985 numops
= 0; /* don't process any more ops */
2987 /* find the node for the file handle */
2988 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
->fh_data
, fh
->fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2990 status
= NFSERR_BADHANDLE
;
2993 numops
= 0; /* don't process any more ops */
2996 nfsm_chain_add_32(error
, &nmrep
, op
);
2997 nfsm_chain_add_32(error
, &nmrep
, status
);
2998 if (!error
&& (status
== EBADRPC
)) {
3002 /* only allow returning size, change, and mtime attrs */
3003 NFS_CLEAR_ATTRIBUTES(&rbitmap
);
3005 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
3006 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_CHANGE
);
3007 attrbytes
+= 2 * NFSX_UNSIGNED
;
3009 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
3010 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_SIZE
);
3011 attrbytes
+= 2 * NFSX_UNSIGNED
;
3013 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
3014 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_TIME_MODIFY
);
3015 attrbytes
+= 3 * NFSX_UNSIGNED
;
3017 nfsm_chain_add_bitmap(error
, &nmrep
, rbitmap
, NFS_ATTR_BITMAP_LEN
);
3018 nfsm_chain_add_32(error
, &nmrep
, attrbytes
);
3019 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
3020 nfsm_chain_add_64(error
, &nmrep
,
3021 np
->n_vattr
.nva_change
+ ((np
->n_flag
& NMODIFIED
) ? 1 : 0));
3023 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
3024 nfsm_chain_add_64(error
, &nmrep
, np
->n_size
);
3026 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
3027 nfsm_chain_add_64(error
, &nmrep
, np
->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]);
3028 nfsm_chain_add_32(error
, &nmrep
, np
->n_vattr
.nva_timensec
[NFSTIME_MODIFY
]);
3030 nfs_node_unlock(np
);
3031 vnode_put(NFSTOV(np
));
3035 * If we hit an error building the reply, we can't easily back up.
3036 * So we'll just update the status and hope the server ignores the
3040 case NFS_OP_CB_RECALL
:
3041 // (STATEID, TRUNCATE, FH) -> (STATUS)
3043 nfsm_chain_get_stateid(error
, &nmreq
, &stateid
);
3044 nfsm_chain_get_32(error
, &nmreq
, truncate
);
3045 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, fh
);
3049 numops
= 0; /* don't process any more ops */
3051 /* find the node for the file handle */
3052 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
->fh_data
, fh
->fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
3054 status
= NFSERR_BADHANDLE
;
3057 numops
= 0; /* don't process any more ops */
3058 } else if (!(np
->n_openflags
& N_DELEG_MASK
) ||
3059 bcmp(&np
->n_dstateid
, &stateid
, sizeof(stateid
))) {
3060 /* delegation stateid state doesn't match */
3061 status
= NFSERR_BAD_STATEID
;
3062 numops
= 0; /* don't process any more ops */
3064 if (!status
) { /* add node to recall queue, and wake socket thread */
3065 nfs4_delegation_return_enqueue(np
);
3068 nfs_node_unlock(np
);
3069 vnode_put(NFSTOV(np
));
3072 nfsm_chain_add_32(error
, &nmrep
, op
);
3073 nfsm_chain_add_32(error
, &nmrep
, status
);
3074 if (!error
&& (status
== EBADRPC
)) {
3078 case NFS_OP_CB_ILLEGAL
:
3080 nfsm_chain_add_32(error
, &nmrep
, NFS_OP_CB_ILLEGAL
);
3081 status
= NFSERR_OP_ILLEGAL
;
3082 nfsm_chain_add_32(error
, &nmrep
, status
);
3083 numops
= 0; /* don't process any more ops */
3089 if (!status
&& error
) {
3090 if (error
== EBADRPC
) {
3092 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
3093 status
= NFSERR_RESOURCE
;
3095 status
= NFSERR_SERVERFAULT
;
3100 /* Now, set the numres field */
3101 *pnumres
= txdr_unsigned(numres
);
3102 nfsm_chain_build_done(error
, &nmrep
);
3103 nfsm_chain_null(&nmrep
);
3105 /* drop the callback reference on the mount */
3106 lck_mtx_lock(&nfs_global_mutex
);
3108 if (!nmp
->nm_cbid
) {
3109 wakeup(&nmp
->nm_cbrefs
);
3111 lck_mtx_unlock(&nfs_global_mutex
);
3116 if (status
== EBADRPC
) {
3117 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3120 /* build reply header */
3121 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mhead
);
3122 nfsm_chain_init(&nmrep
, mhead
);
3123 nfsm_chain_add_32(error
, &nmrep
, 0); /* insert space for an RPC record mark */
3124 nfsm_chain_add_32(error
, &nmrep
, xid
);
3125 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
3126 if ((status
== ERPCMISMATCH
) || (status
& NFSERR_AUTHERR
)) {
3127 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
3128 if (status
& NFSERR_AUTHERR
) {
3129 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
3130 nfsm_chain_add_32(error
, &nmrep
, (status
& ~NFSERR_AUTHERR
));
3132 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
3133 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
3134 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
3138 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
3139 /* XXX RPCAUTH_NULL verifier */
3140 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
3141 nfsm_chain_add_32(error
, &nmrep
, 0);
3142 /* accepted status */
3145 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
3148 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
3149 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
3150 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
3153 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
3156 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
3159 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
3160 if (status
!= NFSERR_RETVOID
) {
3161 nfsm_chain_add_32(error
, &nmrep
, status
);
3166 nfsm_chain_build_done(error
, &nmrep
);
3168 nfsm_chain_null(&nmrep
);
3171 error
= mbuf_setnext(nmrep
.nmc_mcur
, mrest
);
3173 printf("nfs cb: mbuf_setnext failed %d\n", error
);
3177 /* Calculate the size of the reply */
3179 for (m
= nmrep
.nmc_mhead
; m
; m
= mbuf_next(m
)) {
3180 replen
+= mbuf_len(m
);
3182 mbuf_pkthdr_setlen(mhead
, replen
);
3183 error
= mbuf_pkthdr_setrcvif(mhead
, NULL
);
3184 nfsm_chain_set_recmark(error
, &nmrep
, (replen
- NFSX_UNSIGNED
) | 0x80000000);
3185 nfsm_chain_null(&nmrep
);
3187 /* send the reply */
3188 bzero(&msg
, sizeof(msg
));
3189 error
= sock_sendmbuf(so
, &msg
, mhead
, 0, &sentlen
);
3191 if (!error
&& ((int)sentlen
!= replen
)) {
3192 error
= EWOULDBLOCK
;
3194 if (error
== EWOULDBLOCK
) { /* inability to send response is considered fatal */
3199 nfsm_chain_cleanup(&nmrep
);
3210 NFS_ZFREE(nfs_fhandle_zone
, fh
);
3213 #endif /* CONFIG_NFS4 */
3216 * Initialize an nfs_rpc_record_state structure.
3219 nfs_rpc_record_state_init(struct nfs_rpc_record_state
*nrrsp
)
3221 bzero(nrrsp
, sizeof(*nrrsp
));
3222 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3226 * Clean up an nfs_rpc_record_state structure.
3229 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state
*nrrsp
)
3231 if (nrrsp
->nrrs_m
) {
3232 mbuf_freem(nrrsp
->nrrs_m
);
3233 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3238 * Read the next (marked) RPC record from the socket.
3240 * *recvp returns if any data was received.
3241 * *mp returns the next complete RPC record
3244 nfs_rpc_record_read(socket_t so
, struct nfs_rpc_record_state
*nrrsp
, int flags
, int *recvp
, mbuf_t
*mp
)
3255 /* read the TCP RPC record marker */
3256 while (!error
&& nrrsp
->nrrs_markerleft
) {
3257 aio
.iov_base
= ((char*)&nrrsp
->nrrs_fragleft
+
3258 sizeof(nrrsp
->nrrs_fragleft
) - nrrsp
->nrrs_markerleft
);
3259 aio
.iov_len
= nrrsp
->nrrs_markerleft
;
3260 bzero(&msg
, sizeof(msg
));
3263 error
= sock_receive(so
, &msg
, flags
, &rcvlen
);
3264 if (error
|| !rcvlen
) {
3268 nrrsp
->nrrs_markerleft
-= rcvlen
;
3269 if (nrrsp
->nrrs_markerleft
) {
3272 /* record marker complete */
3273 nrrsp
->nrrs_fragleft
= ntohl(nrrsp
->nrrs_fragleft
);
3274 if (nrrsp
->nrrs_fragleft
& 0x80000000) {
3275 nrrsp
->nrrs_lastfrag
= 1;
3276 nrrsp
->nrrs_fragleft
&= ~0x80000000;
3278 nrrsp
->nrrs_reclen
+= nrrsp
->nrrs_fragleft
;
3279 if (nrrsp
->nrrs_reclen
> NFS_MAXPACKET
) {
3280 /* This is SERIOUS! We are out of sync with the sender. */
3281 log(LOG_ERR
, "impossible RPC record length (%d) on callback", nrrsp
->nrrs_reclen
);
3286 /* read the TCP RPC record fragment */
3287 while (!error
&& !nrrsp
->nrrs_markerleft
&& nrrsp
->nrrs_fragleft
) {
3289 rcvlen
= nrrsp
->nrrs_fragleft
;
3290 error
= sock_receivembuf(so
, NULL
, &m
, flags
, &rcvlen
);
3291 if (error
|| !rcvlen
|| !m
) {
3295 /* append mbufs to list */
3296 nrrsp
->nrrs_fragleft
-= rcvlen
;
3297 if (!nrrsp
->nrrs_m
) {
3300 error
= mbuf_setnext(nrrsp
->nrrs_mlast
, m
);
3302 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error
);
3307 while (mbuf_next(m
)) {
3310 nrrsp
->nrrs_mlast
= m
;
3313 /* done reading fragment? */
3314 if (!error
&& !nrrsp
->nrrs_markerleft
&& !nrrsp
->nrrs_fragleft
) {
3315 /* reset socket fragment parsing state */
3316 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3317 if (nrrsp
->nrrs_lastfrag
) {
3318 /* RPC record complete */
3319 *mp
= nrrsp
->nrrs_m
;
3320 /* reset socket record parsing state */
3321 nrrsp
->nrrs_reclen
= 0;
3322 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3323 nrrsp
->nrrs_lastfrag
= 0;
3333 * The NFS client send routine.
3335 * Send the given NFS request out the mount's socket.
3336 * Holds nfs_sndlock() for the duration of this call.
3338 * - check for request termination (sigintr)
3339 * - wait for reconnect, if necessary
3340 * - UDP: check the congestion window
3341 * - make a copy of the request to send
3342 * - UDP: update the congestion window
3343 * - send the request
3345 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3346 * rexmit count is also updated if this isn't the first send.
3348 * If the send is not successful, make sure R_MUSTRESEND is set.
3349 * If this wasn't the first transmit, set R_RESENDERR.
3350 * Also, undo any UDP congestion window changes made.
3352 * If the error appears to indicate that the socket should
3353 * be reconnected, mark the socket for reconnection.
3355 * Only return errors when the request should be aborted.
3358 nfs_send(struct nfsreq
*req
, int wait
)
3360 struct nfsmount
*nmp
;
3361 struct nfs_socket
*nso
;
3362 int error
, error2
, sotype
, rexmit
, slpflag
= 0, needrecon
;
3364 struct sockaddr
*sendnam
;
3367 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3370 error
= nfs_sndlock(req
);
3372 lck_mtx_lock(&req
->r_mtx
);
3373 req
->r_error
= error
;
3374 req
->r_flags
&= ~R_SENDING
;
3375 lck_mtx_unlock(&req
->r_mtx
);
3379 error
= nfs_sigintr(req
->r_nmp
, req
, NULL
, 0);
3382 lck_mtx_lock(&req
->r_mtx
);
3383 req
->r_error
= error
;
3384 req
->r_flags
&= ~R_SENDING
;
3385 lck_mtx_unlock(&req
->r_mtx
);
3389 sotype
= nmp
->nm_sotype
;
3392 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3393 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3395 if (((req
->r_flags
& R_SETUP
) && !(nmp
->nm_sockflags
& NMSOCK_SETUP
)) ||
3396 ((req
->r_flags
& R_RECOVER
) && !(nmp
->nm_sockflags
& NMSOCK_READY
))) {
3399 lck_mtx_lock(&req
->r_mtx
);
3400 req
->r_error
= error
;
3401 req
->r_flags
&= ~R_SENDING
;
3402 lck_mtx_unlock(&req
->r_mtx
);
3406 /* If the socket needs reconnection, do that now. */
3407 /* wait until socket is ready - unless this request is part of setup */
3408 lck_mtx_lock(&nmp
->nm_lock
);
3409 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) &&
3410 !((nmp
->nm_sockflags
& NMSOCK_SETUP
) && (req
->r_flags
& R_SETUP
))) {
3411 if (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) {
3414 lck_mtx_unlock(&nmp
->nm_lock
);
3417 lck_mtx_lock(&req
->r_mtx
);
3418 req
->r_flags
&= ~R_SENDING
;
3419 req
->r_flags
|= R_MUSTRESEND
;
3421 lck_mtx_unlock(&req
->r_mtx
);
3424 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req
->r_xid
);
3425 lck_mtx_lock(&req
->r_mtx
);
3426 req
->r_flags
&= ~R_MUSTRESEND
;
3428 lck_mtx_unlock(&req
->r_mtx
);
3429 lck_mtx_lock(&nmp
->nm_lock
);
3430 while (!(nmp
->nm_sockflags
& NMSOCK_READY
)) {
3431 /* don't bother waiting if the socket thread won't be reconnecting it */
3432 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
3436 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (nmp
->nm_reconnect_start
> 0)) {
3439 if ((now
.tv_sec
- nmp
->nm_reconnect_start
) >= 8) {
3440 /* soft mount in reconnect for a while... terminate ASAP */
3441 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
3442 req
->r_flags
|= R_SOFTTERM
;
3443 req
->r_error
= error
= ETIMEDOUT
;
3447 /* make sure socket thread is running, then wait */
3448 nfs_mount_sock_thread_wake(nmp
);
3449 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3452 msleep(req
, &nmp
->nm_lock
, slpflag
| PSOCK
, "nfsconnectwait", &ts
);
3455 lck_mtx_unlock(&nmp
->nm_lock
);
3457 lck_mtx_lock(&req
->r_mtx
);
3458 req
->r_error
= error
;
3459 req
->r_flags
&= ~R_SENDING
;
3460 lck_mtx_unlock(&req
->r_mtx
);
3466 /* note that we're using the mount's socket to do the send */
3467 nmp
->nm_state
|= NFSSTA_SENDING
; /* will be cleared by nfs_sndunlock() */
3468 lck_mtx_unlock(&nmp
->nm_lock
);
3471 lck_mtx_lock(&req
->r_mtx
);
3472 req
->r_flags
&= ~R_SENDING
;
3473 req
->r_flags
|= R_MUSTRESEND
;
3475 lck_mtx_unlock(&req
->r_mtx
);
3479 lck_mtx_lock(&req
->r_mtx
);
3480 rexmit
= (req
->r_flags
& R_SENT
);
3482 if (sotype
== SOCK_DGRAM
) {
3483 lck_mtx_lock(&nmp
->nm_lock
);
3484 if (!(req
->r_flags
& R_CWND
) && (nmp
->nm_sent
>= nmp
->nm_cwnd
)) {
3485 /* if we can't send this out yet, wait on the cwnd queue */
3486 slpflag
= (NMFLAG(nmp
, INTR
) && req
->r_thread
) ? PCATCH
: 0;
3487 lck_mtx_unlock(&nmp
->nm_lock
);
3489 req
->r_flags
&= ~R_SENDING
;
3490 req
->r_flags
|= R_MUSTRESEND
;
3491 lck_mtx_unlock(&req
->r_mtx
);
3496 lck_mtx_lock(&nmp
->nm_lock
);
3497 while (nmp
->nm_sent
>= nmp
->nm_cwnd
) {
3498 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3501 TAILQ_INSERT_TAIL(&nmp
->nm_cwndq
, req
, r_cchain
);
3502 msleep(req
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfswaitcwnd", &ts
);
3504 if ((req
->r_cchain
.tqe_next
!= NFSREQNOLIST
)) {
3505 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3506 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3509 lck_mtx_unlock(&nmp
->nm_lock
);
3513 * We update these *before* the send to avoid racing
3514 * against others who may be looking to send requests.
3517 /* first transmit */
3518 req
->r_flags
|= R_CWND
;
3519 nmp
->nm_sent
+= NFS_CWNDSCALE
;
3522 * When retransmitting, turn timing off
3523 * and divide congestion window by 2.
3525 req
->r_flags
&= ~R_TIMING
;
3527 if (nmp
->nm_cwnd
< NFS_CWNDSCALE
) {
3528 nmp
->nm_cwnd
= NFS_CWNDSCALE
;
3531 lck_mtx_unlock(&nmp
->nm_lock
);
3534 req
->r_flags
&= ~R_MUSTRESEND
;
3535 lck_mtx_unlock(&req
->r_mtx
);
3537 error
= mbuf_copym(req
->r_mhead
, 0, MBUF_COPYALL
,
3538 wait
? MBUF_WAITOK
: MBUF_DONTWAIT
, &mreqcopy
);
3541 log(LOG_INFO
, "nfs_send: mbuf copy failed %d\n", error
);
3544 lck_mtx_lock(&req
->r_mtx
);
3545 req
->r_flags
&= ~R_SENDING
;
3546 req
->r_flags
|= R_MUSTRESEND
;
3548 lck_mtx_unlock(&req
->r_mtx
);
3552 bzero(&msg
, sizeof(msg
));
3553 if ((sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
) && ((sendnam
= nmp
->nm_saddr
))) {
3554 msg
.msg_name
= (caddr_t
)sendnam
;
3555 msg
.msg_namelen
= sendnam
->sa_len
;
3557 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy
);
3558 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreqcopy
, 0, &sentlen
);
3559 if (error
|| (sentlen
!= req
->r_mreqlen
)) {
3560 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3561 req
->r_xid
, (int)sentlen
, (int)req
->r_mreqlen
, error
);
3564 if (!error
&& (sentlen
!= req
->r_mreqlen
)) {
3565 error
= EWOULDBLOCK
;
3567 needrecon
= ((sotype
== SOCK_STREAM
) && sentlen
&& (sentlen
!= req
->r_mreqlen
));
3569 lck_mtx_lock(&req
->r_mtx
);
3570 req
->r_flags
&= ~R_SENDING
;
3572 if (rexmit
&& (++req
->r_rexmit
> NFS_MAXREXMIT
)) {
3573 req
->r_rexmit
= NFS_MAXREXMIT
;
3578 req
->r_flags
&= ~R_RESENDERR
;
3580 OSAddAtomic64(1, &nfsstats
.rpcretries
);
3582 req
->r_flags
|= R_SENT
;
3583 if (req
->r_flags
& R_WAITSENT
) {
3584 req
->r_flags
&= ~R_WAITSENT
;
3588 lck_mtx_unlock(&req
->r_mtx
);
3593 req
->r_flags
|= R_MUSTRESEND
;
3595 req
->r_flags
|= R_RESENDERR
;
3597 if ((error
== EINTR
) || (error
== ERESTART
)) {
3598 req
->r_error
= error
;
3600 lck_mtx_unlock(&req
->r_mtx
);
3602 if (sotype
== SOCK_DGRAM
) {
3604 * Note: even though a first send may fail, we consider
3605 * the request sent for congestion window purposes.
3606 * So we don't need to undo any of the changes made above.
3609 * Socket errors ignored for connectionless sockets??
3610 * For now, ignore them all
3612 if ((error
!= EINTR
) && (error
!= ERESTART
) &&
3613 (error
!= EWOULDBLOCK
) && (error
!= EIO
) && (nso
== nmp
->nm_nso
)) {
3614 int clearerror
= 0, optlen
= sizeof(clearerror
);
3615 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &clearerror
, &optlen
);
3616 #ifdef NFS_SOCKET_DEBUGGING
3618 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3625 /* check if it appears we should reconnect the socket */
3628 /* if send timed out, reconnect if on TCP */
3629 if (sotype
!= SOCK_STREAM
) {
3645 /* case ECANCELED??? */
3649 if (needrecon
&& (nso
== nmp
->nm_nso
)) { /* mark socket as needing reconnect */
3650 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req
->r_xid
, error
);
3651 nfs_need_reconnect(nmp
);
3656 if (nfs_is_dead(error
, nmp
)) {
3661 * Don't log some errors:
3662 * EPIPE errors may be common with servers that drop idle connections.
3663 * EADDRNOTAVAIL may occur on network transitions.
3664 * ENOTCONN may occur under some network conditions.
3666 if ((error
== EPIPE
) || (error
== EADDRNOTAVAIL
) || (error
== ENOTCONN
)) {
3669 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
3670 log(LOG_INFO
, "nfs send error %d for server %s\n", error
,
3671 !req
->r_nmp
? "<unmounted>" :
3672 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
);
3675 /* prefer request termination error over other errors */
3676 error2
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0);
3681 /* only allow the following errors to be returned */
3682 if ((error
!= EINTR
) && (error
!= ERESTART
) && (error
!= EIO
) &&
3683 (error
!= ENXIO
) && (error
!= ETIMEDOUT
)) {
3685 * We got some error we don't know what do do with,
3686 * i.e., we're not reconnecting, we map it to
3687 * EIO. Presumably our send failed and we better tell
3688 * the caller so they don't wait for a reply that is
3689 * never going to come. If we are reconnecting we
3690 * return 0 and the request will be resent.
3692 error
= needrecon
? 0 : EIO
;
3698 * NFS client socket upcalls
3700 * Pull RPC replies out of an NFS mount's socket and match them
3701 * up with the pending request.
3703 * The datagram code is simple because we always get whole
3704 * messages out of the socket.
3706 * The stream code is more involved because we have to parse
3707 * the RPC records out of the stream.
3710 /* NFS client UDP socket upcall */
3712 nfs_udp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3714 struct nfsmount
*nmp
= arg
;
3715 struct nfs_socket
*nso
= nmp
->nm_nso
;
3720 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3725 /* make sure we're on the current socket */
3726 if (!nso
|| (nso
->nso_so
!= so
)) {
3732 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
3734 nfs_request_match_reply(nmp
, m
);
3736 } while (m
&& !error
);
3738 if (error
&& (error
!= EWOULDBLOCK
)) {
3739 /* problems with the socket... mark for reconnection */
3740 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error
);
3741 nfs_need_reconnect(nmp
);
3745 /* NFS client TCP socket upcall */
3747 nfs_tcp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3749 struct nfsmount
*nmp
= arg
;
3750 struct nfs_socket
*nso
= nmp
->nm_nso
;
3751 struct nfs_rpc_record_state nrrs
;
3757 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3761 /* make sure we're on the current socket */
3762 lck_mtx_lock(&nmp
->nm_lock
);
3764 if (!nso
|| (nso
->nso_so
!= so
) || (nmp
->nm_sockflags
& (NMSOCK_DISCONNECTING
))) {
3765 lck_mtx_unlock(&nmp
->nm_lock
);
3768 lck_mtx_unlock(&nmp
->nm_lock
);
3770 /* make sure this upcall should be trying to do work */
3771 lck_mtx_lock(&nso
->nso_lock
);
3772 if (nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) {
3773 lck_mtx_unlock(&nso
->nso_lock
);
3776 nso
->nso_flags
|= NSO_UPCALL
;
3777 nrrs
= nso
->nso_rrs
;
3778 lck_mtx_unlock(&nso
->nso_lock
);
3780 /* loop while we make error-free progress */
3781 while (!error
&& recv
) {
3782 error
= nfs_rpc_record_read(so
, &nrrs
, MSG_DONTWAIT
, &recv
, &m
);
3783 if (m
) { /* match completed response with request */
3784 nfs_request_match_reply(nmp
, m
);
3788 /* Update the sockets's rpc parsing state */
3789 lck_mtx_lock(&nso
->nso_lock
);
3790 nso
->nso_rrs
= nrrs
;
3791 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
3794 nso
->nso_flags
&= ~NSO_UPCALL
;
3795 lck_mtx_unlock(&nso
->nso_lock
);
3797 wakeup(&nso
->nso_flags
);
3800 #ifdef NFS_SOCKET_DEBUGGING
3801 if (!recv
&& (error
!= EWOULDBLOCK
)) {
3802 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error
);
3805 /* note: no error and no data indicates server closed its end */
3806 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
3807 /* problems with the socket... mark for reconnection */
3808 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error
);
3809 nfs_need_reconnect(nmp
);
3814 * "poke" a socket to try to provoke any pending errors
3817 nfs_sock_poke(struct nfsmount
*nmp
)
3825 lck_mtx_lock(&nmp
->nm_lock
);
3826 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) ||
3827 !(nmp
->nm_sockflags
& NMSOCK_READY
) || !nmp
->nm_nso
|| !nmp
->nm_nso
->nso_so
) {
3828 /* Nothing to poke */
3829 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3830 wakeup(&nmp
->nm_sockflags
);
3831 lck_mtx_unlock(&nmp
->nm_lock
);
3834 lck_mtx_unlock(&nmp
->nm_lock
);
3835 aio
.iov_base
= &dummy
;
3838 bzero(&msg
, sizeof(msg
));
3841 error
= sock_send(nmp
->nm_nso
->nso_so
, &msg
, MSG_DONTWAIT
, &len
);
3842 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error
);
3843 lck_mtx_lock(&nmp
->nm_lock
);
3844 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3845 wakeup(&nmp
->nm_sockflags
);
3846 lck_mtx_unlock(&nmp
->nm_lock
);
3847 nfs_is_dead(error
, nmp
);
3851 * Match an RPC reply with the corresponding request
3854 nfs_request_match_reply(struct nfsmount
*nmp
, mbuf_t mrep
)
3857 struct nfsm_chain nmrep
;
3858 u_int32_t reply
= 0, rxid
= 0;
3859 int error
= 0, asyncioq
, t1
;
3861 bzero(&nmrep
, sizeof(nmrep
));
3862 /* Get the xid and check that it is an rpc reply */
3863 nfsm_chain_dissect_init(error
, &nmrep
, mrep
);
3864 nfsm_chain_get_32(error
, &nmrep
, rxid
);
3865 nfsm_chain_get_32(error
, &nmrep
, reply
);
3866 if (error
|| (reply
!= RPC_REPLY
)) {
3867 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3873 * Loop through the request list to match up the reply
3874 * Iff no match, just drop it.
3876 lck_mtx_lock(&nfs_request_mutex
);
3877 TAILQ_FOREACH(req
, &nfs_reqq
, r_chain
) {
3878 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3881 /* looks like we have it, grab lock and double check */
3882 lck_mtx_lock(&req
->r_mtx
);
3883 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3884 lck_mtx_unlock(&req
->r_mtx
);
3888 req
->r_nmrep
= nmrep
;
3889 lck_mtx_lock(&nmp
->nm_lock
);
3890 if (nmp
->nm_sotype
== SOCK_DGRAM
) {
3892 * Update congestion window.
3893 * Do the additive increase of one rpc/rtt.
3895 FSDBG(530, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3896 if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
3898 ((NFS_CWNDSCALE
* NFS_CWNDSCALE
) +
3899 (nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
3900 if (nmp
->nm_cwnd
> NFS_MAXCWND
) {
3901 nmp
->nm_cwnd
= NFS_MAXCWND
;
3904 if (req
->r_flags
& R_CWND
) {
3905 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3906 req
->r_flags
&= ~R_CWND
;
3908 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3909 /* congestion window is open, poke the cwnd queue */
3910 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3911 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3912 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3917 * Update rtt using a gain of 0.125 on the mean
3918 * and a gain of 0.25 on the deviation.
3920 if (req
->r_flags
& R_TIMING
) {
3922 * Since the timer resolution of
3923 * NFS_HZ is so course, it can often
3924 * result in r_rtt == 0. Since
3925 * r_rtt == N means that the actual
3926 * rtt is between N+dt and N+2-dt ticks,
3929 if (proct
[req
->r_procnum
] == 0) {
3930 panic("nfs_request_match_reply: proct[%d] is zero", req
->r_procnum
);
3932 t1
= req
->r_rtt
+ 1;
3933 t1
-= (NFS_SRTT(req
) >> 3);
3934 NFS_SRTT(req
) += t1
;
3938 t1
-= (NFS_SDRTT(req
) >> 2);
3939 NFS_SDRTT(req
) += t1
;
3941 nmp
->nm_timeouts
= 0;
3942 lck_mtx_unlock(&nmp
->nm_lock
);
3943 /* signal anyone waiting on this request */
3945 asyncioq
= (req
->r_callback
.rcb_func
!= NULL
);
3947 if (nfs_request_using_gss(req
)) {
3948 nfs_gss_clnt_rpcdone(req
);
3950 #endif /* CONFIG_NFS_GSS */
3951 lck_mtx_unlock(&req
->r_mtx
);
3952 lck_mtx_unlock(&nfs_request_mutex
);
3953 /* if it's an async RPC with a callback, queue it up */
3955 nfs_asyncio_finish(req
);
3961 /* not matched to a request, so drop it. */
3962 lck_mtx_unlock(&nfs_request_mutex
);
3963 OSAddAtomic64(1, &nfsstats
.rpcunexpected
);
3969 * Wait for the reply for a given request...
3970 * ...potentially resending the request if necessary.
3973 nfs_wait_reply(struct nfsreq
*req
)
3975 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3976 int error
= 0, slpflag
, first
= 1;
3978 if (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
3984 lck_mtx_lock(&req
->r_mtx
);
3985 while (!req
->r_nmrep
.nmc_mhead
) {
3986 if ((error
= nfs_sigintr(req
->r_nmp
, req
, first
? NULL
: req
->r_thread
, 0))) {
3989 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
3992 /* check if we need to resend */
3993 if (req
->r_flags
& R_MUSTRESEND
) {
3994 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3995 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
3996 req
->r_flags
|= R_SENDING
;
3997 lck_mtx_unlock(&req
->r_mtx
);
3998 if (nfs_request_using_gss(req
)) {
4000 * It's an RPCSEC_GSS request.
4001 * Can't just resend the original request
4002 * without bumping the cred sequence number.
4003 * Go back and re-build the request.
4005 lck_mtx_lock(&req
->r_mtx
);
4006 req
->r_flags
&= ~R_SENDING
;
4007 lck_mtx_unlock(&req
->r_mtx
);
4010 error
= nfs_send(req
, 1);
4011 lck_mtx_lock(&req
->r_mtx
);
4012 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
4013 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
, error
);
4017 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
4021 /* need to poll if we're P_NOREMOTEHANG */
4022 if (nfs_noremotehang(req
->r_thread
)) {
4025 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitreply", &ts
);
4026 first
= slpflag
= 0;
4028 lck_mtx_unlock(&req
->r_mtx
);
4034 * An NFS request goes something like this:
4035 * (nb: always frees up mreq mbuf list)
4036 * nfs_request_create()
4037 * - allocates a request struct if one is not provided
4038 * - initial fill-in of the request struct
4039 * nfs_request_add_header()
4040 * - add the RPC header
4041 * nfs_request_send()
4042 * - link it into list
4043 * - call nfs_send() for first transmit
4044 * nfs_request_wait()
4045 * - call nfs_wait_reply() to wait for the reply
4046 * nfs_request_finish()
4047 * - break down rpc header and return with error or nfs reply
4048 * pointed to by nmrep.
4049 * nfs_request_rele()
4050 * nfs_request_destroy()
4051 * - clean up the request struct
4052 * - free the request struct if it was allocated by nfs_request_create()
4056 * Set up an NFS request struct (allocating if no request passed in).
4061 mount_t mp
, /* used only if !np */
4062 struct nfsm_chain
*nmrest
,
4066 struct nfsreq
**reqp
)
4068 struct nfsreq
*req
, *newreq
= NULL
;
4069 struct nfsmount
*nmp
;
4073 /* allocate a new NFS request structure */
4074 req
= newreq
= zalloc_flags(nfs_req_zone
, Z_WAITOK
| Z_ZERO
);
4076 bzero(req
, sizeof(*req
));
4078 if (req
== newreq
) {
4079 req
->r_flags
= R_ALLOCATED
;
4082 nmp
= VFSTONFS(np
? NFSTOMP(np
) : mp
);
4083 if (nfs_mount_gone(nmp
)) {
4085 NFS_ZFREE(nfs_req_zone
, newreq
);
4089 lck_mtx_lock(&nmp
->nm_lock
);
4090 if ((nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
4091 (nmp
->nm_state
& NFSSTA_TIMEO
)) {
4092 lck_mtx_unlock(&nmp
->nm_lock
);
4093 mbuf_freem(nmrest
->nmc_mhead
);
4094 nmrest
->nmc_mhead
= NULL
;
4096 NFS_ZFREE(nfs_req_zone
, newreq
);
4101 if ((nmp
->nm_vers
!= NFS_VER4
) && (procnum
>= 0) && (procnum
< NFS_NPROCS
)) {
4102 OSAddAtomic64(1, &nfsstats
.rpccnt
[procnum
]);
4104 if ((nmp
->nm_vers
== NFS_VER4
) && (procnum
!= NFSPROC4_COMPOUND
) && (procnum
!= NFSPROC4_NULL
)) {
4105 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum
);
4108 lck_mtx_init(&req
->r_mtx
, &nfs_request_grp
, LCK_ATTR_NULL
);
4112 req
->r_thread
= thd
;
4114 req
->r_flags
|= R_NOINTR
;
4116 if (IS_VALID_CRED(cred
)) {
4117 kauth_cred_ref(cred
);
4120 req
->r_procnum
= procnum
;
4121 if (proct
[procnum
] > 0) {
4122 req
->r_flags
|= R_TIMING
;
4124 req
->r_nmrep
.nmc_mhead
= NULL
;
4125 SLIST_INIT(&req
->r_gss_seqlist
);
4126 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
4127 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4128 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4130 /* set auth flavor to use for request */
4132 req
->r_auth
= RPCAUTH_NONE
;
4133 } else if (req
->r_np
&& (req
->r_np
->n_auth
!= RPCAUTH_INVALID
)) {
4134 req
->r_auth
= req
->r_np
->n_auth
;
4136 req
->r_auth
= nmp
->nm_auth
;
4139 lck_mtx_unlock(&nmp
->nm_lock
);
4141 /* move the request mbuf chain to the nfsreq */
4142 req
->r_mrest
= nmrest
->nmc_mhead
;
4143 nmrest
->nmc_mhead
= NULL
;
4145 req
->r_flags
|= R_INITTED
;
4154 * Clean up and free an NFS request structure.
4157 nfs_request_destroy(struct nfsreq
*req
)
4159 struct nfsmount
*nmp
;
4160 int clearjbtimeo
= 0;
4163 struct gss_seq
*gsp
, *ngsp
;
4166 if (!req
|| !(req
->r_flags
& R_INITTED
)) {
4170 req
->r_flags
&= ~R_INITTED
;
4171 if (req
->r_lflags
& RL_QUEUED
) {
4172 nfs_reqdequeue(req
);
4175 if (req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
4177 * Still on an async I/O queue?
4178 * %%% But which one, we may be on a local iod.
4180 lck_mtx_lock(&nfsiod_mutex
);
4181 if (nmp
&& req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
4182 TAILQ_REMOVE(&nmp
->nm_iodq
, req
, r_achain
);
4183 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
4185 lck_mtx_unlock(&nfsiod_mutex
);
4188 lck_mtx_lock(&req
->r_mtx
);
4190 lck_mtx_lock(&nmp
->nm_lock
);
4191 if (req
->r_flags
& R_CWND
) {
4192 /* Decrement the outstanding request count. */
4193 req
->r_flags
&= ~R_CWND
;
4194 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4195 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4196 /* congestion window is open, poke the cwnd queue */
4197 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4198 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4199 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4203 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4204 if ((req
->r_flags
& R_RESENDQ
) && req
->r_rchain
.tqe_next
!= NFSREQNOLIST
) {
4205 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4206 req
->r_flags
&= ~R_RESENDQ
;
4207 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4209 if (req
->r_cchain
.tqe_next
!= NFSREQNOLIST
) {
4210 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
4211 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4213 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4214 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4216 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4218 lck_mtx_unlock(&nmp
->nm_lock
);
4220 lck_mtx_unlock(&req
->r_mtx
);
4223 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4226 mbuf_freem(req
->r_mhead
);
4227 } else if (req
->r_mrest
) {
4228 mbuf_freem(req
->r_mrest
);
4230 if (req
->r_nmrep
.nmc_mhead
) {
4231 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
4233 if (IS_VALID_CRED(req
->r_cred
)) {
4234 kauth_cred_unref(&req
->r_cred
);
4237 if (nfs_request_using_gss(req
)) {
4238 nfs_gss_clnt_rpcdone(req
);
4240 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
)
4242 if (req
->r_gss_ctx
) {
4243 nfs_gss_clnt_ctx_unref(req
);
4245 #endif /* CONFIG_NFS_GSS */
4246 if (req
->r_wrongsec
) {
4247 FREE(req
->r_wrongsec
, M_TEMP
);
4250 nfs_mount_rele(nmp
);
4252 lck_mtx_destroy(&req
->r_mtx
, &nfs_request_grp
);
4253 if (req
->r_flags
& R_ALLOCATED
) {
4254 NFS_ZFREE(nfs_req_zone
, req
);
4259 nfs_request_ref(struct nfsreq
*req
, int locked
)
4262 lck_mtx_lock(&req
->r_mtx
);
4264 if (req
->r_refs
<= 0) {
4265 panic("nfsreq reference error");
4269 lck_mtx_unlock(&req
->r_mtx
);
4274 nfs_request_rele(struct nfsreq
*req
)
4278 lck_mtx_lock(&req
->r_mtx
);
4279 if (req
->r_refs
<= 0) {
4280 panic("nfsreq reference underflow");
4283 destroy
= (req
->r_refs
== 0);
4284 lck_mtx_unlock(&req
->r_mtx
);
4286 nfs_request_destroy(req
);
4292 * Add an (updated) RPC header with authorization to an NFS request.
4295 nfs_request_add_header(struct nfsreq
*req
)
4297 struct nfsmount
*nmp
;
4301 /* free up any previous header */
4302 if ((m
= req
->r_mhead
)) {
4303 while (m
&& (m
!= req
->r_mrest
)) {
4306 req
->r_mhead
= NULL
;
4310 if (nfs_mount_gone(nmp
)) {
4314 error
= nfsm_rpchead(req
, req
->r_mrest
, &req
->r_xid
, &req
->r_mhead
);
4319 req
->r_mreqlen
= mbuf_pkthdr_len(req
->r_mhead
);
4321 if (nfs_mount_gone(nmp
)) {
4324 lck_mtx_lock(&nmp
->nm_lock
);
4325 if (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) {
4326 req
->r_retry
= nmp
->nm_retry
;
4328 req
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
4330 lck_mtx_unlock(&nmp
->nm_lock
);
4337 * Queue an NFS request up and send it out.
4340 nfs_request_send(struct nfsreq
*req
, int wait
)
4342 struct nfsmount
*nmp
;
4345 lck_mtx_lock(&req
->r_mtx
);
4346 req
->r_flags
|= R_SENDING
;
4347 lck_mtx_unlock(&req
->r_mtx
);
4349 lck_mtx_lock(&nfs_request_mutex
);
4352 if (nfs_mount_gone(nmp
)) {
4353 lck_mtx_unlock(&nfs_request_mutex
);
4358 if (!req
->r_start
) {
4359 req
->r_start
= now
.tv_sec
;
4360 req
->r_lastmsg
= now
.tv_sec
-
4361 ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
4364 OSAddAtomic64(1, &nfsstats
.rpcrequests
);
4367 * Make sure the request is not in the queue.
4369 if (req
->r_lflags
& RL_QUEUED
) {
4371 panic("nfs_request_send: req %p is already in global requests queue", req
);
4373 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
4374 req
->r_lflags
&= ~RL_QUEUED
;
4375 #endif /* DEVELOPMENT */
4379 * Chain request into list of outstanding requests. Be sure
4380 * to put it LAST so timer finds oldest requests first.
4381 * Make sure that the request queue timer is running
4382 * to check for possible request timeout.
4384 TAILQ_INSERT_TAIL(&nfs_reqq
, req
, r_chain
);
4385 req
->r_lflags
|= RL_QUEUED
;
4386 if (!nfs_request_timer_on
) {
4387 nfs_request_timer_on
= 1;
4388 nfs_interval_timer_start(nfs_request_timer_call
,
4391 lck_mtx_unlock(&nfs_request_mutex
);
4393 /* Send the request... */
4394 return nfs_send(req
, wait
);
4398 * Call nfs_wait_reply() to wait for the reply.
4401 nfs_request_wait(struct nfsreq
*req
)
4403 req
->r_error
= nfs_wait_reply(req
);
4407 * Finish up an NFS request by dequeueing it and
4408 * doing the initial NFS request reply processing.
4413 struct nfsm_chain
*nmrepp
,
4416 struct nfsmount
*nmp
;
4419 uint32_t verf_len
= 0;
4420 uint32_t reply_status
= 0;
4421 uint32_t rejected_status
= 0;
4422 uint32_t auth_status
= 0;
4423 uint32_t accepted_status
= 0;
4424 struct nfsm_chain nmrep
;
4425 int error
, clearjbtimeo
;
4427 error
= req
->r_error
;
4430 nmrepp
->nmc_mhead
= NULL
;
4433 /* RPC done, unlink the request. */
4434 nfs_reqdequeue(req
);
4436 mrep
= req
->r_nmrep
.nmc_mhead
;
4440 if ((req
->r_flags
& R_CWND
) && nmp
) {
4442 * Decrement the outstanding request count.
4444 req
->r_flags
&= ~R_CWND
;
4445 lck_mtx_lock(&nmp
->nm_lock
);
4446 FSDBG(273, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4447 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4448 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4449 /* congestion window is open, poke the cwnd queue */
4450 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4451 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4452 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4455 lck_mtx_unlock(&nmp
->nm_lock
);
4459 if (nfs_request_using_gss(req
)) {
4461 * If the request used an RPCSEC_GSS credential
4462 * then reset its sequence number bit in the
4465 nfs_gss_clnt_rpcdone(req
);
4468 * If we need to re-send, go back and re-build the
4469 * request based on a new sequence number.
4470 * Note that we're using the original XID.
4472 if (error
== EAGAIN
) {
4477 error
= nfs_gss_clnt_args_restore(req
); // remove any trailer mbufs
4478 req
->r_nmrep
.nmc_mhead
= NULL
;
4479 req
->r_flags
|= R_RESTART
;
4480 if (error
== ENEEDAUTH
) {
4481 req
->r_xid
= 0; // get a new XID
4487 #endif /* CONFIG_NFS_GSS */
4490 * If there was a successful reply, make sure to mark the mount as up.
4491 * If a tprintf message was given (or if this is a timed-out soft mount)
4492 * then post a tprintf message indicating the server is alive again.
4495 if ((req
->r_flags
& R_TPRINTFMSG
) ||
4496 (nmp
&& (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) &&
4497 ((nmp
->nm_state
& (NFSSTA_TIMEO
| NFSSTA_FORCE
| NFSSTA_DEAD
)) == NFSSTA_TIMEO
))) {
4498 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, "is alive again");
4500 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, NULL
);
4503 if (!error
&& !nmp
) {
4509 * break down the RPC header and check if ok
4511 nmrep
= req
->r_nmrep
;
4512 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
4514 if (reply_status
== RPC_MSGDENIED
) {
4515 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
4517 if (rejected_status
== RPC_MISMATCH
) {
4521 nfsm_chain_get_32(error
, &nmrep
, auth_status
);
4523 switch (auth_status
) {
4525 case RPCSEC_GSS_CREDPROBLEM
:
4526 case RPCSEC_GSS_CTXPROBLEM
:
4528 * An RPCSEC_GSS cred or context problem.
4529 * We can't use it anymore.
4530 * Restore the args, renew the context
4531 * and set up for a resend.
4533 error
= nfs_gss_clnt_args_restore(req
);
4534 if (error
&& error
!= ENEEDAUTH
) {
4539 error
= nfs_gss_clnt_ctx_renew(req
);
4545 req
->r_nmrep
.nmc_mhead
= NULL
;
4546 req
->r_xid
= 0; // get a new XID
4547 req
->r_flags
|= R_RESTART
;
4549 #endif /* CONFIG_NFS_GSS */
4557 /* Now check the verifier */
4558 nfsm_chain_get_32(error
, &nmrep
, verf_type
); // verifier flavor
4559 nfsm_chain_get_32(error
, &nmrep
, verf_len
); // verifier length
4562 switch (req
->r_auth
) {
4565 /* Any AUTH_SYS verifier is ignored */
4567 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
4569 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
4575 error
= nfs_gss_clnt_verf_get(req
, &nmrep
,
4576 verf_type
, verf_len
, &accepted_status
);
4578 #endif /* CONFIG_NFS_GSS */
4582 switch (accepted_status
) {
4584 if (req
->r_procnum
== NFSPROC_NULL
) {
4586 * The NFS null procedure is unique,
4587 * in not returning an NFS status.
4591 nfsm_chain_get_32(error
, &nmrep
, *status
);
4595 if ((nmp
->nm_vers
!= NFS_VER2
) && (*status
== NFSERR_TRYLATER
)) {
4597 * It's a JUKEBOX error - delay and try again
4599 int delay
, slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4602 req
->r_nmrep
.nmc_mhead
= NULL
;
4603 if ((req
->r_delay
>= 30) && !(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4604 /* we're not yet completely mounted and */
4605 /* we can't complete an RPC, so we fail */
4606 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4608 error
= req
->r_error
;
4611 req
->r_delay
= !req
->r_delay
? NFS_TRYLATERDEL
: (req
->r_delay
* 2);
4612 if (req
->r_delay
> 30) {
4615 if (nmp
->nm_tprintf_initial_delay
&& (req
->r_delay
>= nmp
->nm_tprintf_initial_delay
)) {
4616 if (!(req
->r_flags
& R_JBTPRINTFMSG
)) {
4617 req
->r_flags
|= R_JBTPRINTFMSG
;
4618 lck_mtx_lock(&nmp
->nm_lock
);
4620 lck_mtx_unlock(&nmp
->nm_lock
);
4622 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_JUKEBOXTIMEO
,
4623 "resource temporarily unavailable (jukebox)", 0);
4625 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (req
->r_delay
== 30) &&
4626 !(req
->r_flags
& R_NOINTR
)) {
4627 /* for soft mounts, just give up after a short while */
4628 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4630 error
= req
->r_error
;
4633 delay
= req
->r_delay
;
4634 if (req
->r_callback
.rcb_func
) {
4637 req
->r_resendtime
= now
.tv_sec
+ delay
;
4640 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
4643 tsleep(nfs_request_finish
, PSOCK
| slpflag
, "nfs_jukebox_trylater", hz
);
4645 } while (--delay
> 0);
4647 req
->r_xid
= 0; // get a new XID
4648 req
->r_flags
|= R_RESTART
;
4650 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_TRYLATER
);
4654 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4655 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4656 lck_mtx_lock(&nmp
->nm_lock
);
4658 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4659 lck_mtx_unlock(&nmp
->nm_lock
);
4660 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, "resource available again");
4664 if ((nmp
->nm_vers
>= NFS_VER4
) && (*status
== NFSERR_WRONGSEC
)) {
4666 * Hmmm... we need to try a different security flavor.
4667 * The first time a request hits this, we will allocate an array
4668 * to track flavors to try. We fill the array with the mount's
4669 * preferred flavors or the server's preferred flavors or just the
4670 * flavors we support.
4672 uint32_t srvflavors
[NX_MAX_SEC_FLAVORS
];
4675 /* Call SECINFO to try to get list of flavors from server. */
4676 srvcount
= NX_MAX_SEC_FLAVORS
;
4677 nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, req
->r_cred
, srvflavors
, &srvcount
);
4679 if (!req
->r_wrongsec
) {
4680 /* first time... set up flavor array */
4681 MALLOC(req
->r_wrongsec
, uint32_t*, NX_MAX_SEC_FLAVORS
* sizeof(uint32_t), M_TEMP
, M_WAITOK
);
4682 if (!req
->r_wrongsec
) {
4687 if (nmp
->nm_sec
.count
) { /* use the mount's preferred list of flavors */
4688 for (; i
< nmp
->nm_sec
.count
; i
++) {
4689 req
->r_wrongsec
[i
] = nmp
->nm_sec
.flavors
[i
];
4691 } else if (srvcount
) { /* otherwise use the server's list of flavors */
4692 for (; i
< srvcount
; i
++) {
4693 req
->r_wrongsec
[i
] = srvflavors
[i
];
4695 } else { /* otherwise, just try the flavors we support. */
4696 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5P
;
4697 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5I
;
4698 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5
;
4699 req
->r_wrongsec
[i
++] = RPCAUTH_SYS
;
4700 req
->r_wrongsec
[i
++] = RPCAUTH_NONE
;
4702 for (; i
< NX_MAX_SEC_FLAVORS
; i
++) { /* invalidate any remaining slots */
4703 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4707 /* clear the current flavor from the list */
4708 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4709 if (req
->r_wrongsec
[i
] == req
->r_auth
) {
4710 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4714 /* find the next flavor to try */
4715 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4716 if (req
->r_wrongsec
[i
] != RPCAUTH_INVALID
) {
4717 if (!srvcount
) { /* no server list, just try it */
4720 /* check that it's in the server's list */
4721 for (j
= 0; j
< srvcount
; j
++) {
4722 if (req
->r_wrongsec
[i
] == srvflavors
[j
]) {
4726 if (j
< srvcount
) { /* found */
4729 /* not found in server list */
4730 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4733 if (i
== NX_MAX_SEC_FLAVORS
) {
4734 /* nothing left to try! */
4739 /* retry with the next auth flavor */
4740 req
->r_auth
= req
->r_wrongsec
[i
];
4741 req
->r_xid
= 0; // get a new XID
4742 req
->r_flags
|= R_RESTART
;
4744 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_WRONGSEC
);
4747 if ((nmp
->nm_vers
>= NFS_VER4
) && req
->r_wrongsec
) {
4749 * We renegotiated security for this request; so update the
4750 * default security flavor for the associated node.
4753 req
->r_np
->n_auth
= req
->r_auth
;
4756 #endif /* CONFIG_NFS4 */
4757 if (*status
== NFS_OK
) {
4759 * Successful NFS request
4762 req
->r_nmrep
.nmc_mhead
= NULL
;
4765 /* Got an NFS error of some kind */
4768 * If the File Handle was stale, invalidate the
4769 * lookup cache, just in case.
4771 if ((*status
== ESTALE
) && req
->r_np
) {
4772 cache_purge(NFSTOV(req
->r_np
));
4773 /* if monitored, also send delete event */
4774 if (vnode_ismonitored(NFSTOV(req
->r_np
))) {
4775 nfs_vnode_notify(req
->r_np
, (VNODE_EVENT_ATTRIB
| VNODE_EVENT_DELETE
));
4778 if (nmp
->nm_vers
== NFS_VER2
) {
4783 req
->r_nmrep
.nmc_mhead
= NULL
;
4786 case RPC_PROGUNAVAIL
:
4787 error
= EPROGUNAVAIL
;
4789 case RPC_PROGMISMATCH
:
4790 error
= ERPCMISMATCH
;
4792 case RPC_PROCUNAVAIL
:
4793 error
= EPROCUNAVAIL
;
4798 case RPC_SYSTEM_ERR
:
4804 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4805 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4806 lck_mtx_lock(&nmp
->nm_lock
);
4808 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4809 lck_mtx_unlock(&nmp
->nm_lock
);
4811 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4814 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
,
4815 (!error
&& (*status
== NFS_OK
)) ? 0xf0f0f0f0 : error
);
4820 * NFS request using a GSS/Kerberos security flavor?
4823 nfs_request_using_gss(struct nfsreq
*req
)
4825 if (!req
->r_gss_ctx
) {
4828 switch (req
->r_auth
) {
4838 * Perform an NFS request synchronously.
4844 mount_t mp
, /* used only if !np */
4845 struct nfsm_chain
*nmrest
,
4848 struct nfsreq_secinfo_args
*si
,
4849 struct nfsm_chain
*nmrepp
,
4853 return nfs_request2(np
, mp
, nmrest
, procnum
,
4854 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4855 si
, 0, nmrepp
, xidp
, status
);
4861 mount_t mp
, /* used only if !np */
4862 struct nfsm_chain
*nmrest
,
4866 struct nfsreq_secinfo_args
*si
,
4868 struct nfsm_chain
*nmrepp
,
4875 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
);
4876 if ((error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, &req
))) {
4879 req
->r_flags
|= (flags
& (R_OPTMASK
| R_SOFT
));
4881 req
->r_secinfo
= *si
;
4884 FSDBG_TOP(273, R_XID32(req
->r_xid
), np
, procnum
, 0);
4887 req
->r_flags
&= ~R_RESTART
;
4888 if ((error
= nfs_request_add_header(req
))) {
4894 if ((error
= nfs_request_send(req
, 1))) {
4897 nfs_request_wait(req
);
4898 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4901 } while (req
->r_flags
& R_RESTART
);
4903 FSDBG_BOT(273, R_XID32(req
->r_xid
), np
, procnum
, error
);
4904 nfs_request_rele(req
);
4906 NFS_ZFREE(nfs_req_zone
, req
);
4913 * Set up a new null proc request to exchange GSS context tokens with the
4914 * server. Associate the context that we are setting up with the request that we
4921 struct nfsm_chain
*nmrest
,
4925 struct nfs_gss_clnt_ctx
*cp
, /* Set to gss context to renew or setup */
4926 struct nfsm_chain
*nmrepp
,
4930 int error
, wait
= 1;
4932 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
);
4933 if ((error
= nfs_request_create(NULL
, mp
, nmrest
, NFSPROC_NULL
, thd
, cred
, &req
))) {
4936 req
->r_flags
|= (flags
& R_OPTMASK
);
4939 printf("nfs_request_gss request has no context\n");
4940 nfs_request_rele(req
);
4941 error
= NFSERR_EAUTH
;
4944 nfs_gss_clnt_ctx_ref(req
, cp
);
4947 * Don't wait for a reply to a context destroy advisory
4948 * to avoid hanging on a dead server.
4950 if (cp
->gss_clnt_proc
== RPCSEC_GSS_DESTROY
) {
4954 FSDBG_TOP(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, 0);
4957 req
->r_flags
&= ~R_RESTART
;
4958 if ((error
= nfs_request_add_header(req
))) {
4962 if ((error
= nfs_request_send(req
, wait
))) {
4969 nfs_request_wait(req
);
4970 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4973 } while (req
->r_flags
& R_RESTART
);
4975 FSDBG_BOT(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, error
);
4977 nfs_gss_clnt_ctx_unref(req
);
4978 nfs_request_rele(req
);
4980 NFS_ZFREE(nfs_req_zone
, req
);
4983 #endif /* CONFIG_NFS_GSS */
4986 * Create and start an asynchronous NFS request.
4991 mount_t mp
, /* used only if !np */
4992 struct nfsm_chain
*nmrest
,
4996 struct nfsreq_secinfo_args
*si
,
4998 struct nfsreq_cbinfo
*cb
,
4999 struct nfsreq
**reqp
)
5002 struct nfsmount
*nmp
;
5005 error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, reqp
);
5007 FSDBG(274, (req
? R_XID32(req
->r_xid
) : 0), np
, procnum
, error
);
5011 req
->r_flags
|= (flags
& R_OPTMASK
);
5012 req
->r_flags
|= R_ASYNC
;
5014 req
->r_secinfo
= *si
;
5017 req
->r_callback
= *cb
;
5019 error
= nfs_request_add_header(req
);
5021 req
->r_flags
|= R_WAITSENT
;
5022 if (req
->r_callback
.rcb_func
) {
5023 nfs_request_ref(req
, 0);
5025 error
= nfs_request_send(req
, 1);
5026 lck_mtx_lock(&req
->r_mtx
);
5027 if (!error
&& !(req
->r_flags
& R_SENT
) && req
->r_callback
.rcb_func
) {
5028 /* make sure to wait until this async I/O request gets sent */
5029 int slpflag
= (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
5030 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
5031 while (!(req
->r_flags
& R_SENT
)) {
5033 if ((req
->r_flags
& R_RESENDQ
) && !nfs_mount_gone(nmp
)) {
5034 lck_mtx_lock(&nmp
->nm_lock
);
5035 if ((req
->r_flags
& R_RESENDQ
) && (nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
5037 * It's not going to get off the resend queue if we're in recovery.
5038 * So, just take it off ourselves. We could be holding mount state
5039 * busy and thus holding up the start of recovery.
5041 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
5042 req
->r_flags
&= ~R_RESENDQ
;
5043 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
5044 lck_mtx_unlock(&nmp
->nm_lock
);
5045 req
->r_flags
|= R_SENDING
;
5046 lck_mtx_unlock(&req
->r_mtx
);
5047 error
= nfs_send(req
, 1);
5048 /* Remove the R_RESENDQ reference */
5049 nfs_request_rele(req
);
5050 lck_mtx_lock(&req
->r_mtx
);
5056 lck_mtx_unlock(&nmp
->nm_lock
);
5058 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
5061 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitsent", &ts
);
5065 sent
= req
->r_flags
& R_SENT
;
5066 lck_mtx_unlock(&req
->r_mtx
);
5067 if (error
&& req
->r_callback
.rcb_func
&& !sent
) {
5068 nfs_request_rele(req
);
5071 FSDBG(274, R_XID32(req
->r_xid
), np
, procnum
, error
);
5072 if (error
|| req
->r_callback
.rcb_func
) {
5073 nfs_request_rele(req
);
5080 * Wait for and finish an asynchronous NFS request.
5083 nfs_request_async_finish(
5085 struct nfsm_chain
*nmrepp
,
5089 int error
= 0, asyncio
= req
->r_callback
.rcb_func
? 1 : 0;
5090 struct nfsmount
*nmp
;
5092 lck_mtx_lock(&req
->r_mtx
);
5094 req
->r_flags
|= R_ASYNCWAIT
;
5096 while (req
->r_flags
& R_RESENDQ
) { /* wait until the request is off the resend queue */
5097 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
5099 if ((nmp
= req
->r_nmp
)) {
5100 lck_mtx_lock(&nmp
->nm_lock
);
5101 if ((req
->r_flags
& R_RESENDQ
) && (nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
5103 * It's not going to get off the resend queue if we're in recovery.
5104 * So, just take it off ourselves. We could be holding mount state
5105 * busy and thus holding up the start of recovery.
5107 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
5108 req
->r_flags
&= ~R_RESENDQ
;
5109 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
5110 /* Remove the R_RESENDQ reference */
5111 assert(req
->r_refs
> 0);
5113 lck_mtx_unlock(&nmp
->nm_lock
);
5116 lck_mtx_unlock(&nmp
->nm_lock
);
5118 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
5121 msleep(req
, &req
->r_mtx
, PZERO
- 1, "nfsresendqwait", &ts
);
5123 lck_mtx_unlock(&req
->r_mtx
);
5126 nfs_request_wait(req
);
5127 error
= nfs_request_finish(req
, nmrepp
, status
);
5130 while (!error
&& (req
->r_flags
& R_RESTART
)) {
5132 assert(req
->r_achain
.tqe_next
== NFSREQNOLIST
);
5133 lck_mtx_lock(&req
->r_mtx
);
5134 req
->r_flags
&= ~R_IOD
;
5135 if (req
->r_resendtime
) { /* send later */
5136 nfs_asyncio_resend(req
);
5137 lck_mtx_unlock(&req
->r_mtx
);
5140 lck_mtx_unlock(&req
->r_mtx
);
5143 req
->r_flags
&= ~R_RESTART
;
5144 if ((error
= nfs_request_add_header(req
))) {
5147 if ((error
= nfs_request_send(req
, !asyncio
))) {
5153 nfs_request_wait(req
);
5154 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
5162 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, error
);
5163 nfs_request_rele(req
);
5168 * Cancel a pending asynchronous NFS request.
5171 nfs_request_async_cancel(struct nfsreq
*req
)
5173 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, 0xD1ED1E);
5174 nfs_request_rele(req
);
5178 * Flag a request as being terminated.
5181 nfs_softterm(struct nfsreq
*req
)
5183 struct nfsmount
*nmp
= req
->r_nmp
;
5184 req
->r_flags
|= R_SOFTTERM
;
5185 req
->r_error
= ETIMEDOUT
;
5186 if (!(req
->r_flags
& R_CWND
) || nfs_mount_gone(nmp
)) {
5189 /* update congestion window */
5190 req
->r_flags
&= ~R_CWND
;
5191 lck_mtx_lock(&nmp
->nm_lock
);
5192 FSDBG(532, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
5193 nmp
->nm_sent
-= NFS_CWNDSCALE
;
5194 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
5195 /* congestion window is open, poke the cwnd queue */
5196 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
5197 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
5198 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
5201 lck_mtx_unlock(&nmp
->nm_lock
);
5205 * Ensure req isn't in use by the timer, then dequeue it.
5208 nfs_reqdequeue(struct nfsreq
*req
)
5210 lck_mtx_lock(&nfs_request_mutex
);
5211 while (req
->r_lflags
& RL_BUSY
) {
5212 req
->r_lflags
|= RL_WAITING
;
5213 msleep(&req
->r_lflags
, &nfs_request_mutex
, PSOCK
, "reqdeq", NULL
);
5215 if (req
->r_lflags
& RL_QUEUED
) {
5216 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
5217 req
->r_lflags
&= ~RL_QUEUED
;
5219 lck_mtx_unlock(&nfs_request_mutex
);
5223 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5224 * free()'d out from under it.
5227 nfs_reqbusy(struct nfsreq
*req
)
5229 if (req
->r_lflags
& RL_BUSY
) {
5230 panic("req locked");
5232 req
->r_lflags
|= RL_BUSY
;
5236 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5239 nfs_reqnext(struct nfsreq
*req
)
5241 struct nfsreq
* nextreq
;
5247 * We need to get and busy the next req before signalling the
5248 * current one, otherwise wakeup() may block us and we'll race to
5249 * grab the next req.
5251 nextreq
= TAILQ_NEXT(req
, r_chain
);
5252 if (nextreq
!= NULL
) {
5253 nfs_reqbusy(nextreq
);
5255 /* unbusy and signal. */
5256 req
->r_lflags
&= ~RL_BUSY
;
5257 if (req
->r_lflags
& RL_WAITING
) {
5258 req
->r_lflags
&= ~RL_WAITING
;
5259 wakeup(&req
->r_lflags
);
5265 * NFS request queue timer routine
5267 * Scan the NFS request queue for any requests that have timed out.
5269 * Alert the system of unresponsive servers.
5270 * Mark expired requests on soft mounts as terminated.
5271 * For UDP, mark/signal requests for retransmission.
5274 nfs_request_timer(__unused
void *param0
, __unused
void *param1
)
5277 struct nfsmount
*nmp
;
5278 int timeo
, maxtime
, finish_asyncio
, error
;
5280 TAILQ_HEAD(nfs_mount_pokeq
, nfsmount
) nfs_mount_poke_queue
;
5281 TAILQ_INIT(&nfs_mount_poke_queue
);
5284 lck_mtx_lock(&nfs_request_mutex
);
5285 req
= TAILQ_FIRST(&nfs_reqq
);
5286 if (req
== NULL
) { /* no requests - turn timer off */
5287 nfs_request_timer_on
= 0;
5288 lck_mtx_unlock(&nfs_request_mutex
);
5295 for (; req
!= NULL
; req
= nfs_reqnext(req
)) {
5298 NFS_SOCK_DBG("Found a request with out a mount!\n");
5301 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
5304 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0))) {
5305 if (req
->r_callback
.rcb_func
!= NULL
) {
5306 /* async I/O RPC needs to be finished */
5307 lck_mtx_lock(&req
->r_mtx
);
5308 req
->r_error
= error
;
5309 finish_asyncio
= !(req
->r_flags
& R_WAITSENT
);
5311 lck_mtx_unlock(&req
->r_mtx
);
5312 if (finish_asyncio
) {
5313 nfs_asyncio_finish(req
);
5319 lck_mtx_lock(&req
->r_mtx
);
5321 if (nmp
->nm_tprintf_initial_delay
&&
5322 ((req
->r_rexmit
> 2) || (req
->r_flags
& R_RESENDERR
)) &&
5323 ((req
->r_lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
5324 req
->r_lastmsg
= now
.tv_sec
;
5325 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5326 "not responding", 1);
5327 req
->r_flags
|= R_TPRINTFMSG
;
5328 lck_mtx_lock(&nmp
->nm_lock
);
5329 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
5330 lck_mtx_unlock(&nmp
->nm_lock
);
5331 /* we're not yet completely mounted and */
5332 /* we can't complete an RPC, so we fail */
5333 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5335 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5337 lck_mtx_unlock(&req
->r_mtx
);
5338 if (finish_asyncio
) {
5339 nfs_asyncio_finish(req
);
5343 lck_mtx_unlock(&nmp
->nm_lock
);
5347 * Put a reasonable limit on the maximum timeout,
5348 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5350 if (!(NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && !nfs_can_squish(nmp
)) {
5351 maxtime
= NFS_MAXTIMEO
;
5352 } else if ((req
->r_flags
& (R_SETUP
| R_RECOVER
)) ||
5353 ((nmp
->nm_reconnect_start
<= 0) || ((now
.tv_sec
- nmp
->nm_reconnect_start
) < 8))) {
5354 maxtime
= (NFS_MAXTIMEO
/ (nmp
->nm_timeouts
+ 1)) / 2;
5356 maxtime
= NFS_MINTIMEO
/ 4;
5360 * Check for request timeout.
5362 if (req
->r_rtt
>= 0) {
5364 lck_mtx_lock(&nmp
->nm_lock
);
5365 if (req
->r_flags
& R_RESENDERR
) {
5366 /* with resend errors, retry every few seconds */
5369 if (req
->r_procnum
== NFSPROC_NULL
&& req
->r_gss_ctx
!= NULL
) {
5370 timeo
= NFS_MINIDEMTIMEO
; // gss context setup
5371 } else if (NMFLAG(nmp
, DUMBTIMER
)) {
5372 timeo
= nmp
->nm_timeo
;
5374 timeo
= NFS_RTO(nmp
, proct
[req
->r_procnum
]);
5377 /* ensure 62.5 ms floor */
5378 while (16 * timeo
< hz
) {
5381 if (nmp
->nm_timeouts
> 0) {
5382 timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
5385 /* limit timeout to max */
5386 if (timeo
> maxtime
) {
5389 if (req
->r_rtt
<= timeo
) {
5390 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req
->r_rtt
, timeo
);
5391 lck_mtx_unlock(&nmp
->nm_lock
);
5392 lck_mtx_unlock(&req
->r_mtx
);
5395 /* The request has timed out */
5396 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5397 req
->r_procnum
, proct
[req
->r_procnum
],
5398 req
->r_xid
, req
->r_rtt
, timeo
, nmp
->nm_timeouts
,
5399 (now
.tv_sec
- req
->r_start
) * NFS_HZ
, maxtime
);
5400 if (nmp
->nm_timeouts
< 8) {
5403 if (nfs_mount_check_dead_timeout(nmp
)) {
5404 /* Unbusy this request */
5405 req
->r_lflags
&= ~RL_BUSY
;
5406 if (req
->r_lflags
& RL_WAITING
) {
5407 req
->r_lflags
&= ~RL_WAITING
;
5408 wakeup(&req
->r_lflags
);
5410 lck_mtx_unlock(&req
->r_mtx
);
5412 /* No need to poke this mount */
5413 if (nmp
->nm_sockflags
& NMSOCK_POKE
) {
5414 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
5415 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5417 /* Release our lock state, so we can become a zombie */
5418 lck_mtx_unlock(&nfs_request_mutex
);
5421 * Note nfs_mount_make zombie(nmp) must be
5422 * called with nm_lock held. After doing some
5423 * work we release nm_lock in
5424 * nfs_make_mount_zombie with out acquiring any
5425 * other locks. (Later, in nfs_mount_zombie we
5426 * will acquire &nfs_request_mutex, r_mtx,
5427 * nm_lock in that order). So we should not be
5428 * introducing deadlock here. We take a reference
5429 * on the mount so that its still there when we
5433 nfs_mount_make_zombie(nmp
);
5434 lck_mtx_unlock(&nmp
->nm_lock
);
5435 nfs_mount_rele(nmp
);
5438 * All the request for this mount have now been
5439 * removed from the request queue. Restart to
5440 * process the remaining mounts
5445 /* if it's been a few seconds, try poking the socket */
5446 if ((nmp
->nm_sotype
== SOCK_STREAM
) &&
5447 ((now
.tv_sec
- req
->r_start
) >= 3) &&
5448 !(nmp
->nm_sockflags
& (NMSOCK_POKE
| NMSOCK_UNMOUNT
)) &&
5449 (nmp
->nm_sockflags
& NMSOCK_READY
)) {
5450 nmp
->nm_sockflags
|= NMSOCK_POKE
;
5452 * We take a ref on the mount so that we know the mount will still be there
5453 * when we process the nfs_mount_poke_queue. An unmount request will block
5454 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5455 * the reference after calling nfs_sock_poke below;
5458 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5460 lck_mtx_unlock(&nmp
->nm_lock
);
5463 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5464 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& (R_SETUP
| R_RECOVER
| R_SOFT
))) &&
5465 ((req
->r_rexmit
>= req
->r_retry
) || /* too many */
5466 ((now
.tv_sec
- req
->r_start
) * NFS_HZ
> maxtime
))) { /* too long */
5467 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5468 lck_mtx_lock(&nmp
->nm_lock
);
5469 if (!(nmp
->nm_state
& NFSSTA_TIMEO
)) {
5470 lck_mtx_unlock(&nmp
->nm_lock
);
5471 /* make sure we note the unresponsive server */
5472 /* (maxtime may be less than tprintf delay) */
5473 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5474 "not responding", 1);
5475 req
->r_lastmsg
= now
.tv_sec
;
5476 req
->r_flags
|= R_TPRINTFMSG
;
5478 lck_mtx_unlock(&nmp
->nm_lock
);
5480 if (req
->r_flags
& R_NOINTR
) {
5481 /* don't terminate nointr requests on timeout */
5482 lck_mtx_unlock(&req
->r_mtx
);
5485 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5486 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
,
5487 now
.tv_sec
- req
->r_start
);
5489 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5491 lck_mtx_unlock(&req
->r_mtx
);
5492 if (finish_asyncio
) {
5493 nfs_asyncio_finish(req
);
5498 /* for TCP, only resend if explicitly requested */
5499 if ((nmp
->nm_sotype
== SOCK_STREAM
) && !(req
->r_flags
& R_MUSTRESEND
)) {
5500 if (++req
->r_rexmit
> NFS_MAXREXMIT
) {
5501 req
->r_rexmit
= NFS_MAXREXMIT
;
5504 lck_mtx_unlock(&req
->r_mtx
);
5509 * The request needs to be (re)sent. Kick the requester to resend it.
5510 * (unless it's already marked as needing a resend)
5512 if ((req
->r_flags
& R_MUSTRESEND
) && (req
->r_rtt
== -1)) {
5513 lck_mtx_unlock(&req
->r_mtx
);
5516 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5517 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
5518 req
->r_flags
|= R_MUSTRESEND
;
5521 if ((req
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
5522 nfs_asyncio_resend(req
);
5524 lck_mtx_unlock(&req
->r_mtx
);
5527 lck_mtx_unlock(&nfs_request_mutex
);
5529 /* poke any sockets */
5530 while ((nmp
= TAILQ_FIRST(&nfs_mount_poke_queue
))) {
5531 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5533 nfs_mount_rele(nmp
);
5536 nfs_interval_timer_start(nfs_request_timer_call
, NFS_REQUESTDELAY
);
5540 * check a thread's proc for the "noremotehang" flag.
5543 nfs_noremotehang(thread_t thd
)
5545 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5546 return p
&& proc_noremotehang(p
);
5550 * Test for a termination condition pending on the process.
5551 * This is used to determine if we need to bail on a mount.
5552 * ETIMEDOUT is returned if there has been a soft timeout.
5553 * EINTR is returned if there is a signal pending that is not being ignored
5554 * ESHUTDOWN is return if the system is in shutdown.
5555 * and the mount is interruptable, or if we are a thread that is in the process
5556 * of cancellation (also SIGKILL posted).
5558 extern int sigprop
[NSIG
+ 1];
5560 nfs_sigintr(struct nfsmount
*nmp
, struct nfsreq
*req
, thread_t thd
, int nmplocked
)
5569 if (get_system_inshutdown()) {
5570 NFS_SOCK_DBG("Shutdown in progress\n");
5574 if (req
&& (req
->r_flags
& R_SOFTTERM
)) {
5575 return ETIMEDOUT
; /* request has been terminated. */
5577 if (req
&& (req
->r_flags
& R_NOINTR
)) {
5578 thd
= NULL
; /* don't check for signal on R_NOINTR */
5581 lck_mtx_lock(&nmp
->nm_lock
);
5583 if (nmp
->nm_state
& NFSSTA_FORCE
) {
5584 /* If a force unmount is in progress then fail. */
5586 } else if (vfs_isforce(nmp
->nm_mountp
)) {
5587 /* Someone is unmounting us, go soft and mark it. */
5588 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_SOFT
);
5589 nmp
->nm_state
|= NFSSTA_FORCE
;
5592 /* Check if the mount is marked dead. */
5593 if (!error
&& (nmp
->nm_state
& NFSSTA_DEAD
)) {
5598 * If the mount is hung and we've requested not to hang
5599 * on remote filesystems, then bail now.
5601 if (current_proc() != kernproc
&&
5602 !error
&& (nmp
->nm_state
& NFSSTA_TIMEO
) && nfs_noremotehang(thd
)) {
5607 lck_mtx_unlock(&nmp
->nm_lock
);
5613 /* may not have a thread for async I/O */
5614 if (thd
== NULL
|| current_proc() == kernproc
) {
5619 * Check if the process is aborted, but don't interrupt if we
5620 * were killed by a signal and this is the exiting thread which
5621 * is attempting to dump core.
5623 if (((p
= current_proc()) != kernproc
) && current_thread_aborted() &&
5624 (!(p
->p_acflag
& AXSIG
) || (p
->exit_thread
!= current_thread()) ||
5625 (p
->p_sigacts
== NULL
) ||
5626 (p
->p_sigacts
->ps_sig
< 1) || (p
->p_sigacts
->ps_sig
> NSIG
) ||
5627 !(sigprop
[p
->p_sigacts
->ps_sig
] & SA_CORE
))) {
5631 /* mask off thread and process blocked signals. */
5632 if (NMFLAG(nmp
, INTR
) && ((p
= get_bsdthreadtask_info(thd
))) &&
5633 proc_pendingsignals(p
, NFSINT_SIGMASK
)) {
5640 * Lock a socket against others.
5641 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5642 * and also to avoid race conditions between the processes with nfs requests
5643 * in progress when a reconnect is necessary.
5646 nfs_sndlock(struct nfsreq
*req
)
5648 struct nfsmount
*nmp
= req
->r_nmp
;
5650 int error
= 0, slpflag
= 0;
5651 struct timespec ts
= { .tv_sec
= 0, .tv_nsec
= 0 };
5653 if (nfs_mount_gone(nmp
)) {
5657 lck_mtx_lock(&nmp
->nm_lock
);
5658 statep
= &nmp
->nm_state
;
5660 if (NMFLAG(nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
5663 while (*statep
& NFSSTA_SNDLOCK
) {
5664 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 1))) {
5667 *statep
|= NFSSTA_WANTSND
;
5668 if (nfs_noremotehang(req
->r_thread
)) {
5671 msleep(statep
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsndlck", &ts
);
5672 if (slpflag
== PCATCH
) {
5678 *statep
|= NFSSTA_SNDLOCK
;
5680 lck_mtx_unlock(&nmp
->nm_lock
);
5685 * Unlock the stream socket for others.
5688 nfs_sndunlock(struct nfsreq
*req
)
5690 struct nfsmount
*nmp
= req
->r_nmp
;
5691 int *statep
, wake
= 0;
5696 lck_mtx_lock(&nmp
->nm_lock
);
5697 statep
= &nmp
->nm_state
;
5698 if ((*statep
& NFSSTA_SNDLOCK
) == 0) {
5699 panic("nfs sndunlock");
5701 *statep
&= ~(NFSSTA_SNDLOCK
| NFSSTA_SENDING
);
5702 if (*statep
& NFSSTA_WANTSND
) {
5703 *statep
&= ~NFSSTA_WANTSND
;
5706 lck_mtx_unlock(&nmp
->nm_lock
);
5714 struct nfsmount
*nmp
,
5716 struct sockaddr
*saddr
,
5723 struct nfsm_chain
*nmrep
)
5725 int error
= 0, on
= 1, try, sendat
= 2, soproto
, recv
, optlen
, restoreto
= 0;
5726 socket_t newso
= NULL
;
5727 struct sockaddr_storage ss
;
5728 struct timeval orig_rcvto
, orig_sndto
, tv
= { .tv_sec
= 1, .tv_usec
= 0 };
5729 mbuf_t m
, mrep
= NULL
;
5731 uint32_t rxid
= 0, reply
= 0, reply_status
, rejected_status
;
5732 uint32_t verf_type
, verf_len
, accepted_status
;
5733 size_t readlen
, sentlen
;
5734 struct nfs_rpc_record_state nrrs
;
5737 /* create socket and set options */
5738 if (saddr
->sa_family
== AF_LOCAL
) {
5741 soproto
= (sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
;
5743 if ((error
= sock_socket(saddr
->sa_family
, sotype
, soproto
, NULL
, NULL
, &newso
))) {
5747 if (bindresv
&& saddr
->sa_family
!= AF_LOCAL
) {
5748 int level
= (saddr
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
5749 int optname
= (saddr
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
5750 int portrange
= IP_PORTRANGE_LOW
;
5751 error
= sock_setsockopt(newso
, level
, optname
, &portrange
, sizeof(portrange
));
5753 ss
.ss_len
= saddr
->sa_len
;
5754 ss
.ss_family
= saddr
->sa_family
;
5755 if (ss
.ss_family
== AF_INET
) {
5756 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
5757 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
5758 } else if (ss
.ss_family
== AF_INET6
) {
5759 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
5760 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
5765 error
= sock_bind(newso
, (struct sockaddr
*)&ss
);
5770 if (sotype
== SOCK_STREAM
) {
5771 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5774 error
= sock_connect(newso
, saddr
, MSG_DONTWAIT
);
5775 if (error
== EINPROGRESS
) {
5780 while ((error
= sock_connectwait(newso
, &tv
)) == EINPROGRESS
) {
5781 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5782 if (++count
>= NFS_AUX_CONNECTION_TIMEOUT
) {
5789 if (((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
)))) ||
5790 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
)))) ||
5791 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
))))) {
5796 /* make sure socket is using a one second timeout in this function */
5797 optlen
= sizeof(orig_rcvto
);
5798 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, &optlen
);
5800 optlen
= sizeof(orig_sndto
);
5801 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, &optlen
);
5804 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
));
5805 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
));
5810 if (sotype
== SOCK_STREAM
) {
5811 sendat
= 0; /* we only resend the request for UDP */
5812 nfs_rpc_record_state_init(&nrrs
);
5815 for (try = 0; try < timeo
; try++) {
5816 if ((error
= nfs_sigintr(nmp
, NULL
, !try ? NULL
: thd
, 0))) {
5819 if (!try || (try == sendat
)) {
5820 /* send the request (resending periodically for UDP) */
5821 if ((error
= mbuf_copym(mreq
, 0, MBUF_COPYALL
, MBUF_WAITOK
, &m
))) {
5824 bzero(&msg
, sizeof(msg
));
5825 if ((sotype
== SOCK_DGRAM
) && !sock_isconnected(so
)) {
5826 msg
.msg_name
= saddr
;
5827 msg
.msg_namelen
= saddr
->sa_len
;
5829 if ((error
= sock_sendmbuf(so
, &msg
, m
, 0, &sentlen
))) {
5837 /* wait for the response */
5838 if (sotype
== SOCK_STREAM
) {
5839 /* try to read (more of) record */
5840 error
= nfs_rpc_record_read(so
, &nrrs
, 0, &recv
, &mrep
);
5841 /* if we don't have the whole record yet, we'll keep trying */
5844 bzero(&msg
, sizeof(msg
));
5845 error
= sock_receivembuf(so
, &msg
, &mrep
, 0, &readlen
);
5847 if (error
== EWOULDBLOCK
) {
5851 /* parse the response */
5852 nfsm_chain_dissect_init(error
, nmrep
, mrep
);
5853 nfsm_chain_get_32(error
, nmrep
, rxid
);
5854 nfsm_chain_get_32(error
, nmrep
, reply
);
5856 if ((rxid
!= xid
) || (reply
!= RPC_REPLY
)) {
5859 nfsm_chain_get_32(error
, nmrep
, reply_status
);
5861 if (reply_status
== RPC_MSGDENIED
) {
5862 nfsm_chain_get_32(error
, nmrep
, rejected_status
);
5864 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
5867 nfsm_chain_get_32(error
, nmrep
, verf_type
); /* verifier flavor */
5868 nfsm_chain_get_32(error
, nmrep
, verf_len
); /* verifier length */
5871 nfsm_chain_adv(error
, nmrep
, nfsm_rndup(verf_len
));
5873 nfsm_chain_get_32(error
, nmrep
, accepted_status
);
5875 switch (accepted_status
) {
5879 case RPC_PROGUNAVAIL
:
5880 error
= EPROGUNAVAIL
;
5882 case RPC_PROGMISMATCH
:
5883 error
= EPROGMISMATCH
;
5885 case RPC_PROCUNAVAIL
:
5886 error
= EPROCUNAVAIL
;
5891 case RPC_SYSTEM_ERR
:
5900 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, sizeof(tv
));
5901 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, sizeof(tv
));
5904 sock_shutdown(newso
, SHUT_RDWR
);
5913 struct nfsmount
*nmp
,
5915 struct sockaddr
*sa
,
5922 thread_t thd
= vfs_context_thread(ctx
);
5923 kauth_cred_t cred
= vfs_context_ucred(ctx
);
5924 struct sockaddr_storage ss
;
5925 struct sockaddr
*saddr
= (struct sockaddr
*)&ss
;
5926 static struct sockaddr_un rpcbind_cots
= {
5927 sizeof(struct sockaddr_un
),
5931 static struct sockaddr_un rpcbind_clts
= {
5932 sizeof(struct sockaddr_un
),
5936 struct nfsm_chain nmreq
, nmrep
;
5938 int error
= 0, ip
, pmprog
, pmvers
, pmproc
;
5939 uint32_t ualen
= 0, scopeid
= 0, port32
;
5941 char uaddr
[MAX_IPv6_STR_LEN
+ 16];
5943 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5944 if (saddr
->sa_family
== AF_INET
) {
5948 pmproc
= PMAPPROC_GETPORT
;
5949 } else if (saddr
->sa_family
== AF_INET6
) {
5953 pmproc
= RPCBPROC_GETVERSADDR
;
5954 } else if (saddr
->sa_family
== AF_LOCAL
) {
5958 pmproc
= RPCBPROC_GETVERSADDR
;
5959 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un
*)sa
)->sun_path
);
5960 saddr
= (struct sockaddr
*)((stype
== SOCK_STREAM
) ? &rpcbind_cots
: &rpcbind_clts
);
5964 nfsm_chain_null(&nmreq
);
5965 nfsm_chain_null(&nmrep
);
5968 /* send portmapper request to get port/uaddr */
5970 ((struct sockaddr_in
*)saddr
)->sin_port
= htons(PMAPPORT
);
5971 } else if (ip
== 6) {
5972 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(PMAPPORT
);
5974 nfsm_chain_build_alloc_init(error
, &nmreq
, 8 * NFSX_UNSIGNED
);
5975 nfsm_chain_add_32(error
, &nmreq
, protocol
);
5976 nfsm_chain_add_32(error
, &nmreq
, vers
);
5978 nfsm_chain_add_32(error
, &nmreq
, stype
== SOCK_STREAM
? IPPROTO_TCP
: IPPROTO_UDP
);
5979 nfsm_chain_add_32(error
, &nmreq
, 0);
5981 if (stype
== SOCK_STREAM
) {
5983 nfsm_chain_add_string(error
, &nmreq
, "tcp6", 4);
5985 nfsm_chain_add_string(error
, &nmreq
, "ticotsord", 9);
5989 nfsm_chain_add_string(error
, &nmreq
, "udp6", 4);
5991 nfsm_chain_add_string(error
, &nmreq
, "ticlts", 6);
5994 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* uaddr */
5995 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* owner */
5997 nfsm_chain_build_done(error
, &nmreq
);
5999 error
= nfsm_rpchead2(nmp
, stype
, pmprog
, pmvers
, pmproc
,
6000 RPCAUTH_SYS
, cred
, NULL
, nmreq
.nmc_mhead
, &xid
, &mreq
);
6002 nmreq
.nmc_mhead
= NULL
;
6004 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq
);
6005 error
= nfs_aux_request(nmp
, thd
, saddr
, so
,
6006 stype
, mreq
, R_XID32(xid
), 0, timeo
, &nmrep
);
6007 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep
.nmc_mhead
);
6008 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error
, protocol
, vers
,
6009 (saddr
->sa_family
== AF_LOCAL
) ? ((struct sockaddr_un
*)saddr
)->sun_path
:
6010 (saddr
->sa_family
== AF_INET6
) ? "INET6 socket" : "INET socket");
6012 /* grab port from portmap response */
6014 nfsm_chain_get_32(error
, &nmrep
, port32
);
6016 if (NFS_PORT_INVALID(port32
)) {
6019 ((struct sockaddr_in
*)sa
)->sin_port
= htons((in_port_t
)port32
);
6023 /* get uaddr string and convert to sockaddr */
6024 nfsm_chain_get_32(error
, &nmrep
, ualen
);
6026 if (ualen
> (sizeof(uaddr
) - 1)) {
6030 /* program is not available, just return a zero port */
6031 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
6033 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(0);
6035 ((struct sockaddr_un
*)saddr
)->sun_path
[0] = '\0';
6037 NFS_SOCK_DBG("Program %u version %u unavailable", protocol
, vers
);
6039 nfsm_chain_get_opaque(error
, &nmrep
, ualen
, uaddr
);
6040 NFS_SOCK_DBG("Got uaddr %s\n", uaddr
);
6042 uaddr
[ualen
] = '\0';
6044 scopeid
= ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
;
6046 if (!nfs_uaddr2sockaddr(uaddr
, saddr
)) {
6049 if (ip
== 6 && scopeid
!= ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
) {
6050 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
, scopeid
);
6051 ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
= scopeid
;
6056 if ((error
== EPROGMISMATCH
) || (error
== EPROCUNAVAIL
) || (error
== EIO
) || (error
== EBADRPC
)) {
6057 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6058 if (pmvers
== RPCBVERS4
) {
6059 /* fall back to v3 and GETADDR */
6061 pmproc
= RPCBPROC_GETADDR
;
6062 nfsm_chain_cleanup(&nmreq
);
6063 nfsm_chain_cleanup(&nmrep
);
6064 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
6071 bcopy(saddr
, sa
, min(saddr
->sa_len
, sa
->sa_len
));
6075 nfsm_chain_cleanup(&nmreq
);
6076 nfsm_chain_cleanup(&nmrep
);
6077 NFS_SOCK_DBG("Returned %d\n", error
);
6083 nfs_msg(thread_t thd
,
6088 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
6092 tpr
= tprintf_open(p
);
6097 tprintf(tpr
, "nfs server %s: %s, error %d\n", server
, msg
, error
);
6099 tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);
6105 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6106 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6107 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6108 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6109 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6111 uint32_t nfs_squishy_flags
= NFS_SQUISH_MOBILE_ONLY
| NFS_SQUISH_AUTOMOUNTED_ONLY
| NFS_SQUISH_QUICK
;
6112 uint32_t nfs_tcp_sockbuf
= 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6113 int32_t nfs_is_mobile
;
6115 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6116 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6119 * Could this mount be squished?
6122 nfs_can_squish(struct nfsmount
*nmp
)
6124 uint64_t flags
= vfs_flags(nmp
->nm_mountp
);
6125 int softsquish
= ((nfs_squishy_flags
& NFS_SQUISH_SOFT
) & NMFLAG(nmp
, SOFT
));
6127 if (!softsquish
&& (nfs_squishy_flags
& NFS_SQUISH_MOBILE_ONLY
) && nfs_is_mobile
== 0) {
6131 if ((nfs_squishy_flags
& NFS_SQUISH_AUTOMOUNTED_ONLY
) && (flags
& MNT_AUTOMOUNTED
) == 0) {
6139 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6140 * the mount may become "not responding". It's desirable to be able
6141 * to unmount these dead mounts, but only if there is no risk of
6142 * losing data or crashing applications. A "squishy" NFS mount is one
6143 * that can be force unmounted with little risk of harm.
6145 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6146 * in a squishy state iff it is allowed to be squishy and there are no
6147 * dirty pages and there are no mmapped files and there are no files
6148 * open for write. Mounts are allowed to be squishy is controlled by
6149 * the settings of the nfs_squishy_flags and its mobility state. These
6150 * flags can be set by sysctls.
6152 * If nfs_is_squishy determines that we are in a squishy state we will
6153 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6154 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6155 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6156 * update the current dead timeout with the mount's nm_deadtimeout
6157 * value set at mount time.
6159 * Assumes that nm_lock is held.
6161 * Note this routine is racey, but its effects on setting the
6162 * dead timeout only have effects when we're in trouble and are likely
6163 * to stay that way. Since by default its only for automounted
6164 * volumes on mobile machines; this is a reasonable trade off between
6165 * data integrity and user experience. It can be disabled or set via
6170 nfs_is_squishy(struct nfsmount
*nmp
)
6172 mount_t mp
= nmp
->nm_mountp
;
6174 int timeo
= (nfs_squishy_flags
& NFS_SQUISH_QUICK
) ? NFS_SQUISHY_QUICKTIMEOUT
: NFS_SQUISHY_DEADTIMEOUT
;
6176 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6177 vfs_statfs(mp
)->f_mntfromname
, nmp
->nm_curdeadtimeout
, nfs_is_mobile
);
6179 if (!nfs_can_squish(nmp
)) {
6183 timeo
= (nmp
->nm_deadtimeout
> timeo
) ? max(nmp
->nm_deadtimeout
/ 8, timeo
) : timeo
;
6184 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp
->nm_writers
, nmp
->nm_mappers
, timeo
);
6186 if (nmp
->nm_writers
== 0 && nmp
->nm_mappers
== 0) {
6187 uint64_t flags
= mp
? vfs_flags(mp
) : 0;
6191 * Walk the nfs nodes and check for dirty buffers it we're not
6192 * RDONLY and we've not already been declared as squishy since
6193 * this can be a bit expensive.
6195 if (!(flags
& MNT_RDONLY
) && !(nmp
->nm_state
& NFSSTA_SQUISHY
)) {
6196 squishy
= !nfs_mount_is_dirty(mp
);
6202 nmp
->nm_state
|= NFSSTA_SQUISHY
;
6204 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
6207 nmp
->nm_curdeadtimeout
= squishy
? timeo
: nmp
->nm_deadtimeout
;
6209 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp
->nm_curdeadtimeout
);
6215 * On a send operation, if we can't reach the server and we've got only one server to talk to
6216 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6217 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6220 nfs_is_dead(int error
, struct nfsmount
*nmp
)
6224 lck_mtx_lock(&nmp
->nm_lock
);
6225 if (nmp
->nm_state
& NFSSTA_DEAD
) {
6226 lck_mtx_unlock(&nmp
->nm_lock
);
6230 if ((error
!= ENETUNREACH
&& error
!= EHOSTUNREACH
&& error
!= EADDRNOTAVAIL
) ||
6231 !(nmp
->nm_locations
.nl_numlocs
== 1 && nmp
->nm_locations
.nl_locations
[0]->nl_servcount
== 1)) {
6232 lck_mtx_unlock(&nmp
->nm_lock
);
6236 if ((nfs_squishy_flags
& NFS_SQUISH_QUICK
) && nfs_is_squishy(nmp
)) {
6237 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
6238 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
6239 lck_mtx_unlock(&nmp
->nm_lock
);
6240 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
6241 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
6244 lck_mtx_unlock(&nmp
->nm_lock
);
6249 * If we've experienced timeouts and we're not really a
6250 * classic hard mount, then just return cached data to
6251 * the caller instead of likely hanging on an RPC.
6254 nfs_use_cache(struct nfsmount
*nmp
)
6257 *%%% We always let mobile users goto the cache,
6258 * perhaps we should not even require them to have
6261 int cache_ok
= (nfs_is_mobile
|| NMFLAG(nmp
, SOFT
) ||
6262 nfs_can_squish(nmp
) || nmp
->nm_deadtimeout
);
6264 int timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6267 * So if we have a timeout and we're not really a hard hard-mount,
6268 * return 1 to not get things out of the cache.
6271 return (nmp
->nm_state
& timeoutmask
) && cache_ok
;
6275 * Log a message that nfs or lockd server is unresponsive. Check if we
6276 * can be squished and if we can, or that our dead timeout has
6277 * expired, and we're not holding state, set our mount as dead, remove
6278 * our mount state and ask to be unmounted. If we are holding state
6279 * we're being called from the nfs_request_timer and will soon detect
6280 * that we need to unmount.
6283 nfs_down(struct nfsmount
*nmp
, thread_t thd
, int error
, int flags
, const char *msg
, int holding_state
)
6285 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
6286 uint32_t do_vfs_signal
= 0;
6289 if (nfs_mount_gone(nmp
)) {
6293 lck_mtx_lock(&nmp
->nm_lock
);
6295 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6296 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6297 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6299 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6301 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6302 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6304 if ((flags
& NFSSTA_TIMEO
) && !(nmp
->nm_state
& NFSSTA_TIMEO
)) {
6305 nmp
->nm_state
|= NFSSTA_TIMEO
;
6307 if ((flags
& NFSSTA_LOCKTIMEO
) && !(nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6308 nmp
->nm_state
|= NFSSTA_LOCKTIMEO
;
6310 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && !(nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6311 nmp
->nm_state
|= NFSSTA_JUKEBOXTIMEO
;
6314 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6316 nfs_is_squishy(nmp
);
6318 if (unresponsive
&& (nmp
->nm_curdeadtimeout
> 0)) {
6320 if (!wasunresponsive
) {
6321 nmp
->nm_deadto_start
= now
.tv_sec
;
6322 nfs_mount_sock_thread_wake(nmp
);
6323 } else if ((now
.tv_sec
- nmp
->nm_deadto_start
) > nmp
->nm_curdeadtimeout
&& !holding_state
) {
6324 if (!(nmp
->nm_state
& NFSSTA_DEAD
)) {
6325 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
6326 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
6328 do_vfs_signal
= VQ_DEAD
;
6331 lck_mtx_unlock(&nmp
->nm_lock
);
6333 if (do_vfs_signal
== VQ_DEAD
&& !(nmp
->nm_state
& NFSSTA_DEAD
)) {
6334 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
6335 } else if (softnobrowse
|| wasunresponsive
|| !unresponsive
) {
6338 do_vfs_signal
= VQ_NOTRESP
;
6340 if (do_vfs_signal
) {
6341 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, do_vfs_signal
, 0);
6344 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, error
);
6348 nfs_up(struct nfsmount
*nmp
, thread_t thd
, int flags
, const char *msg
)
6350 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
6353 if (nfs_mount_gone(nmp
)) {
6358 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, 0);
6361 lck_mtx_lock(&nmp
->nm_lock
);
6363 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6364 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6365 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6367 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6369 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6370 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6372 if ((flags
& NFSSTA_TIMEO
) && (nmp
->nm_state
& NFSSTA_TIMEO
)) {
6373 nmp
->nm_state
&= ~NFSSTA_TIMEO
;
6375 if ((flags
& NFSSTA_LOCKTIMEO
) && (nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6376 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
6378 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && (nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6379 nmp
->nm_state
&= ~NFSSTA_JUKEBOXTIMEO
;
6382 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6384 nmp
->nm_deadto_start
= 0;
6385 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
6386 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
6387 lck_mtx_unlock(&nmp
->nm_lock
);
6392 do_vfs_signal
= (wasunresponsive
&& !unresponsive
);
6394 if (do_vfs_signal
) {
6395 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_NOTRESP
, 1);
6400 #endif /* CONFIG_NFS_CLIENT */
6402 #if CONFIG_NFS_SERVER
6405 * Generate the rpc reply header
6406 * siz arg. is used to decide if adding a cluster is worthwhile
6410 struct nfsrv_descript
*nd
,
6411 __unused
struct nfsrv_sock
*slp
,
6412 struct nfsm_chain
*nmrepp
,
6417 struct nfsm_chain nmrep
;
6420 err
= nd
->nd_repstat
;
6421 if (err
&& (nd
->nd_vers
== NFS_VER2
)) {
6426 * If this is a big reply, use a cluster else
6427 * try and leave leading space for the lower level headers.
6429 siz
+= RPC_REPLYSIZ
;
6430 if (siz
>= nfs_mbuf_minclsize
) {
6431 error
= mbuf_getpacket(MBUF_WAITOK
, &mrep
);
6433 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mrep
);
6436 /* unable to allocate packet */
6437 /* XXX should we keep statistics for these errors? */
6440 if (siz
< nfs_mbuf_minclsize
) {
6441 /* leave space for lower level headers */
6442 tl
= mbuf_data(mrep
);
6443 tl
+= 80 / sizeof(*tl
); /* XXX max_hdr? XXX */
6444 mbuf_setdata(mrep
, tl
, 6 * NFSX_UNSIGNED
);
6446 nfsm_chain_init(&nmrep
, mrep
);
6447 nfsm_chain_add_32(error
, &nmrep
, nd
->nd_retxid
);
6448 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
6449 if (err
== ERPCMISMATCH
|| (err
& NFSERR_AUTHERR
)) {
6450 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
6451 if (err
& NFSERR_AUTHERR
) {
6452 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
6453 nfsm_chain_add_32(error
, &nmrep
, (err
& ~NFSERR_AUTHERR
));
6455 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
6456 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6457 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6461 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
6462 if (nd
->nd_gss_context
!= NULL
) {
6463 /* RPCSEC_GSS verifier */
6464 error
= nfs_gss_svc_verf_put(nd
, &nmrep
);
6466 nfsm_chain_add_32(error
, &nmrep
, RPC_SYSTEM_ERR
);
6470 /* RPCAUTH_NULL verifier */
6471 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
6472 nfsm_chain_add_32(error
, &nmrep
, 0);
6474 /* accepted status */
6477 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
6480 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
6481 /* XXX hard coded versions? */
6482 nfsm_chain_add_32(error
, &nmrep
, NFS_VER2
);
6483 nfsm_chain_add_32(error
, &nmrep
, NFS_VER3
);
6486 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
6489 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
6492 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
6493 if (nd
->nd_gss_context
!= NULL
) {
6494 error
= nfs_gss_svc_prepare_reply(nd
, &nmrep
);
6496 if (err
!= NFSERR_RETVOID
) {
6497 nfsm_chain_add_32(error
, &nmrep
,
6498 (err
? nfsrv_errmap(nd
, err
) : 0));
6505 nfsm_chain_build_done(error
, &nmrep
);
6507 /* error composing reply header */
6508 /* XXX should we keep statistics for these errors? */
6514 if ((err
!= 0) && (err
!= NFSERR_RETVOID
)) {
6515 OSAddAtomic64(1, &nfsstats
.srvrpc_errs
);
6521 * The nfs server send routine.
6523 * - return EINTR or ERESTART if interrupted by a signal
6524 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6525 * - do any cleanup required by recoverable socket errors (???)
6528 nfsrv_send(struct nfsrv_sock
*slp
, mbuf_t nam
, mbuf_t top
)
6531 socket_t so
= slp
->ns_so
;
6532 struct sockaddr
*sendnam
;
6535 bzero(&msg
, sizeof(msg
));
6536 if (nam
&& !sock_isconnected(so
) && (slp
->ns_sotype
!= SOCK_STREAM
)) {
6537 if ((sendnam
= mbuf_data(nam
))) {
6538 msg
.msg_name
= (caddr_t
)sendnam
;
6539 msg
.msg_namelen
= sendnam
->sa_len
;
6542 if (NFS_IS_DBG(NFS_FAC_SRV
, 15)) {
6543 nfs_dump_mbuf(__func__
, __LINE__
, "nfsrv_send\n", top
);
6545 error
= sock_sendmbuf(so
, &msg
, top
, 0, NULL
);
6549 log(LOG_INFO
, "nfsd send error %d\n", error
);
6551 if ((error
== EWOULDBLOCK
) && (slp
->ns_sotype
== SOCK_STREAM
)) {
6552 error
= EPIPE
; /* zap TCP sockets if they time out on send */
6554 /* Handle any recoverable (soft) socket errors here. (???) */
6555 if (error
!= EINTR
&& error
!= ERESTART
&& error
!= EIO
&&
6556 error
!= EWOULDBLOCK
&& error
!= EPIPE
) {
6564 * Socket upcall routine for the nfsd sockets.
6565 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6566 * Essentially do as much as possible non-blocking, else punt and it will
6567 * be called with MBUF_WAITOK from an nfsd.
6570 nfsrv_rcv(socket_t so
, void *arg
, int waitflag
)
6572 struct nfsrv_sock
*slp
= arg
;
6574 if (!nfsd_thread_count
|| !(slp
->ns_flag
& SLP_VALID
)) {
6578 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
6579 nfsrv_rcv_locked(so
, slp
, waitflag
);
6580 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6583 nfsrv_rcv_locked(socket_t so
, struct nfsrv_sock
*slp
, int waitflag
)
6585 mbuf_t m
, mp
, mhck
, m2
;
6586 int ns_flag
= 0, error
;
6590 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
6591 if (waitflag
== MBUF_DONTWAIT
) {
6592 lck_rw_done(&slp
->ns_rwlock
);
6599 * Define this to test for nfsds handling this under heavy load.
6601 if (waitflag
== MBUF_DONTWAIT
) {
6602 ns_flag
= SLP_NEEDQ
;
6606 if (slp
->ns_sotype
== SOCK_STREAM
) {
6608 * If there are already records on the queue, defer soreceive()
6609 * to an(other) nfsd so that there is feedback to the TCP layer that
6610 * the nfs servers are heavily loaded.
6613 ns_flag
= SLP_NEEDQ
;
6620 bytes_read
= 1000000000;
6621 error
= sock_receivembuf(so
, NULL
, &mp
, MSG_DONTWAIT
, &bytes_read
);
6622 if (error
|| mp
== NULL
) {
6623 if (error
== EWOULDBLOCK
) {
6624 ns_flag
= (waitflag
== MBUF_DONTWAIT
) ? SLP_NEEDQ
: 0;
6626 ns_flag
= SLP_DISCONN
;
6631 if (slp
->ns_rawend
) {
6632 if ((error
= mbuf_setnext(slp
->ns_rawend
, m
))) {
6633 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error
);
6635 slp
->ns_cc
+= bytes_read
;
6638 slp
->ns_cc
= bytes_read
;
6640 while ((m2
= mbuf_next(m
))) {
6646 * Now try and parse record(s) out of the raw stream data.
6648 error
= nfsrv_getstream(slp
, waitflag
);
6650 if (error
== EPERM
) {
6651 ns_flag
= SLP_DISCONN
;
6653 ns_flag
= SLP_NEEDQ
;
6657 struct sockaddr_storage nam
;
6659 if (slp
->ns_reccnt
>= nfsrv_sock_max_rec_queue_length
) {
6660 /* already have max # RPC records queued on this socket */
6661 ns_flag
= SLP_NEEDQ
;
6665 bzero(&msg
, sizeof(msg
));
6666 msg
.msg_name
= (caddr_t
)&nam
;
6667 msg
.msg_namelen
= sizeof(nam
);
6670 bytes_read
= 1000000000;
6671 error
= sock_receivembuf(so
, &msg
, &mp
, MSG_DONTWAIT
| MSG_NEEDSA
, &bytes_read
);
6673 if (msg
.msg_name
&& (mbuf_get(MBUF_WAITOK
, MBUF_TYPE_SONAME
, &mhck
) == 0)) {
6674 mbuf_setlen(mhck
, nam
.ss_len
);
6675 bcopy(&nam
, mbuf_data(mhck
), nam
.ss_len
);
6677 if (mbuf_setnext(m
, mp
)) {
6678 /* trouble... just drop it */
6679 printf("nfsrv_rcv: mbuf_setnext failed\n");
6686 if (slp
->ns_recend
) {
6687 mbuf_setnextpkt(slp
->ns_recend
, m
);
6690 slp
->ns_flag
|= SLP_DOREC
;
6693 mbuf_setnextpkt(m
, NULL
);
6700 * Now try and process the request records, non-blocking.
6704 slp
->ns_flag
|= ns_flag
;
6706 if (waitflag
== MBUF_DONTWAIT
) {
6707 int wake
= (slp
->ns_flag
& SLP_WORKTODO
);
6708 lck_rw_done(&slp
->ns_rwlock
);
6709 if (wake
&& nfsd_thread_count
) {
6710 lck_mtx_lock(&nfsd_mutex
);
6711 nfsrv_wakenfsd(slp
);
6712 lck_mtx_unlock(&nfsd_mutex
);
6718 * Try and extract an RPC request from the mbuf data list received on a
6719 * stream socket. The "waitflag" argument indicates whether or not it
6723 nfsrv_getstream(struct nfsrv_sock
*slp
, int waitflag
)
6726 char *cp1
, *cp2
, *mdata
;
6729 mbuf_t om
, m2
, recm
;
6732 if (slp
->ns_flag
& SLP_GETSTREAM
) {
6733 panic("nfs getstream");
6735 slp
->ns_flag
|= SLP_GETSTREAM
;
6737 if (slp
->ns_reclen
== 0) {
6738 if (slp
->ns_cc
< NFSX_UNSIGNED
) {
6739 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6743 mdata
= mbuf_data(m
);
6745 if (mlen
>= NFSX_UNSIGNED
) {
6746 bcopy(mdata
, (caddr_t
)&recmark
, NFSX_UNSIGNED
);
6747 mdata
+= NFSX_UNSIGNED
;
6748 mlen
-= NFSX_UNSIGNED
;
6749 mbuf_setdata(m
, mdata
, mlen
);
6751 cp1
= (caddr_t
)&recmark
;
6753 while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
6761 mbuf_setdata(m
, cp2
, mlen
);
6764 slp
->ns_cc
-= NFSX_UNSIGNED
;
6765 recmark
= ntohl(recmark
);
6766 slp
->ns_reclen
= recmark
& ~0x80000000;
6767 if (recmark
& 0x80000000) {
6768 slp
->ns_flag
|= SLP_LASTFRAG
;
6770 slp
->ns_flag
&= ~SLP_LASTFRAG
;
6772 if (slp
->ns_reclen
<= 0 || slp
->ns_reclen
> NFS_MAXPACKET
) {
6773 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6779 * Now get the record part.
6781 * Note that slp->ns_reclen may be 0. Linux sometimes
6782 * generates 0-length RPCs
6785 if (slp
->ns_cc
== slp
->ns_reclen
) {
6787 slp
->ns_raw
= slp
->ns_rawend
= NULL
;
6788 slp
->ns_cc
= slp
->ns_reclen
= 0;
6789 } else if (slp
->ns_cc
> slp
->ns_reclen
) {
6793 mdata
= mbuf_data(m
);
6795 while (len
< slp
->ns_reclen
) {
6796 if ((len
+ mlen
) > slp
->ns_reclen
) {
6797 if (mbuf_copym(m
, 0, slp
->ns_reclen
- len
, waitflag
, &m2
)) {
6798 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6802 if (mbuf_setnext(om
, m2
)) {
6803 /* trouble... just drop it */
6804 printf("nfsrv_getstream: mbuf_setnext failed\n");
6806 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6813 mdata
+= slp
->ns_reclen
- len
;
6814 mlen
-= slp
->ns_reclen
- len
;
6815 mbuf_setdata(m
, mdata
, mlen
);
6816 len
= slp
->ns_reclen
;
6817 } else if ((len
+ mlen
) == slp
->ns_reclen
) {
6822 if (mbuf_setnext(om
, NULL
)) {
6823 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6824 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6828 mdata
= mbuf_data(m
);
6834 mdata
= mbuf_data(m
);
6841 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6846 * Accumulate the fragments into a record.
6848 if (slp
->ns_frag
== NULL
) {
6849 slp
->ns_frag
= recm
;
6852 while ((m2
= mbuf_next(m
))) {
6855 if ((error
= mbuf_setnext(m
, recm
))) {
6856 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error
);
6859 if (slp
->ns_flag
& SLP_LASTFRAG
) {
6860 if (slp
->ns_recend
) {
6861 mbuf_setnextpkt(slp
->ns_recend
, slp
->ns_frag
);
6863 slp
->ns_rec
= slp
->ns_frag
;
6864 slp
->ns_flag
|= SLP_DOREC
;
6866 slp
->ns_recend
= slp
->ns_frag
;
6867 slp
->ns_frag
= NULL
;
6873 * Parse an RPC header.
6877 struct nfsrv_sock
*slp
,
6879 struct nfsrv_descript
**ndp
)
6883 struct nfsrv_descript
*nd
;
6887 if (!(slp
->ns_flag
& (SLP_VALID
| SLP_DOREC
)) || (slp
->ns_rec
== NULL
)) {
6890 nd
= zalloc(nfsrv_descript_zone
);
6892 slp
->ns_rec
= mbuf_nextpkt(m
);
6894 mbuf_setnextpkt(m
, NULL
);
6896 slp
->ns_flag
&= ~SLP_DOREC
;
6897 slp
->ns_recend
= NULL
;
6900 if (mbuf_type(m
) == MBUF_TYPE_SONAME
) {
6903 if ((error
= mbuf_setnext(nam
, NULL
))) {
6904 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error
);
6910 nfsm_chain_dissect_init(error
, &nd
->nd_nmreq
, m
);
6912 error
= nfsrv_getreq(nd
);
6918 if (nd
->nd_gss_context
) {
6919 nfs_gss_svc_ctx_deref(nd
->nd_gss_context
);
6921 NFS_ZFREE(nfsrv_descript_zone
, nd
);
6931 * Parse an RPC request
6933 * - fill in the cred struct.
6936 nfsrv_getreq(struct nfsrv_descript
*nd
)
6938 struct nfsm_chain
*nmreq
;
6940 u_int32_t nfsvers
, auth_type
;
6948 nd
->nd_gss_context
= NULL
;
6949 nd
->nd_gss_seqnum
= 0;
6950 nd
->nd_gss_mb
= NULL
;
6952 user_id
= group_id
= -2;
6953 val
= auth_type
= len
= 0;
6955 nmreq
= &nd
->nd_nmreq
;
6956 nfsm_chain_get_32(error
, nmreq
, nd
->nd_retxid
); // XID
6957 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Call
6958 if (!error
&& (val
!= RPC_CALL
)) {
6963 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Version
6965 if (val
!= RPC_VER2
) {
6966 nd
->nd_repstat
= ERPCMISMATCH
;
6967 nd
->nd_procnum
= NFSPROC_NOOP
;
6970 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Program Number
6972 if (val
!= NFS_PROG
) {
6973 nd
->nd_repstat
= EPROGUNAVAIL
;
6974 nd
->nd_procnum
= NFSPROC_NOOP
;
6977 nfsm_chain_get_32(error
, nmreq
, nfsvers
);// NFS Version Number
6979 if ((nfsvers
< NFS_VER2
) || (nfsvers
> NFS_VER3
)) {
6980 nd
->nd_repstat
= EPROGMISMATCH
;
6981 nd
->nd_procnum
= NFSPROC_NOOP
;
6984 nd
->nd_vers
= nfsvers
;
6985 nfsm_chain_get_32(error
, nmreq
, nd
->nd_procnum
);// NFS Procedure Number
6987 if ((nd
->nd_procnum
>= NFS_NPROCS
) ||
6988 ((nd
->nd_vers
== NFS_VER2
) && (nd
->nd_procnum
> NFSV2PROC_STATFS
))) {
6989 nd
->nd_repstat
= EPROCUNAVAIL
;
6990 nd
->nd_procnum
= NFSPROC_NOOP
;
6993 if (nfsvers
!= NFS_VER3
) {
6994 nd
->nd_procnum
= nfsv3_procid
[nd
->nd_procnum
];
6996 nfsm_chain_get_32(error
, nmreq
, auth_type
); // Auth Flavor
6997 nfsm_chain_get_32(error
, nmreq
, len
); // Auth Length
6998 if (!error
&& (len
< 0 || len
> RPCAUTH_MAXSIZ
)) {
7003 /* Handle authentication */
7004 if (auth_type
== RPCAUTH_SYS
) {
7005 struct posix_cred temp_pcred
;
7006 if (nd
->nd_procnum
== NFSPROC_NULL
) {
7009 nd
->nd_sec
= RPCAUTH_SYS
;
7010 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // skip stamp
7011 nfsm_chain_get_32(error
, nmreq
, len
); // hostname length
7012 if (len
< 0 || len
> NFS_MAXNAMLEN
) {
7015 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
)); // skip hostname
7018 /* create a temporary credential using the bits from the wire */
7019 bzero(&temp_pcred
, sizeof(temp_pcred
));
7020 nfsm_chain_get_32(error
, nmreq
, user_id
);
7021 nfsm_chain_get_32(error
, nmreq
, group_id
);
7022 temp_pcred
.cr_groups
[0] = group_id
;
7023 nfsm_chain_get_32(error
, nmreq
, len
); // extra GID count
7024 if ((len
< 0) || (len
> RPCAUTH_UNIXGIDS
)) {
7028 for (i
= 1; i
<= len
; i
++) {
7030 nfsm_chain_get_32(error
, nmreq
, temp_pcred
.cr_groups
[i
]);
7032 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
);
7036 ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (short)(len
+ 1);
7038 nfsrv_group_sort(&temp_pcred
.cr_groups
[0], ngroups
);
7040 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
7041 nfsm_chain_get_32(error
, nmreq
, len
); // verifier length
7042 if (len
< 0 || len
> RPCAUTH_MAXSIZ
) {
7046 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
));
7049 /* request creation of a real credential */
7050 temp_pcred
.cr_uid
= user_id
;
7051 temp_pcred
.cr_ngroups
= ngroups
;
7052 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
7053 if (nd
->nd_cr
== NULL
) {
7054 nd
->nd_repstat
= ENOMEM
;
7055 nd
->nd_procnum
= NFSPROC_NOOP
;
7058 } else if (auth_type
== RPCSEC_GSS
) {
7059 error
= nfs_gss_svc_cred_get(nd
, nmreq
);
7061 if (error
== EINVAL
) {
7062 goto nfsmout
; // drop the request
7064 nd
->nd_repstat
= error
;
7065 nd
->nd_procnum
= NFSPROC_NOOP
;
7069 if (nd
->nd_procnum
== NFSPROC_NULL
) { // assume it's AUTH_NONE
7072 nd
->nd_repstat
= (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
7073 nd
->nd_procnum
= NFSPROC_NOOP
;
7078 if (IS_VALID_CRED(nd
->nd_cr
)) {
7079 kauth_cred_unref(&nd
->nd_cr
);
7081 nfsm_chain_cleanup(nmreq
);
7086 * Search for a sleeping nfsd and wake it up.
7087 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7088 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7089 * Note: Must be called with nfsd_mutex held.
7092 nfsrv_wakenfsd(struct nfsrv_sock
*slp
)
7096 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
7100 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
7101 /* if there's work to do on this socket, make sure it's queued up */
7102 if ((slp
->ns_flag
& SLP_WORKTODO
) && !(slp
->ns_flag
& SLP_QUEUED
)) {
7103 TAILQ_INSERT_TAIL(&nfsrv_sockwait
, slp
, ns_svcq
);
7104 slp
->ns_flag
|= SLP_WAITQ
;
7106 lck_rw_done(&slp
->ns_rwlock
);
7108 /* wake up a waiting nfsd, if possible */
7109 nd
= TAILQ_FIRST(&nfsd_queue
);
7114 TAILQ_REMOVE(&nfsd_queue
, nd
, nfsd_queue
);
7115 nd
->nfsd_flag
&= ~NFSD_WAITING
;
7119 #endif /* CONFIG_NFS_SERVER */
7121 #endif /* CONFIG_NFS */