2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
69 * Socket operations for use by nfs
72 #include <sys/param.h>
73 #include <sys/systm.h>
75 #include <sys/signalvar.h>
76 #include <sys/kauth.h>
77 #include <sys/mount_internal.h>
78 #include <sys/kernel.h>
79 #include <sys/kpi_mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/vnode.h>
82 #include <sys/domain.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/syslog.h>
86 #include <sys/tprintf.h>
87 #include <libkern/OSAtomic.h>
90 #include <kern/clock.h>
91 #include <kern/task.h>
92 #include <kern/thread.h>
93 #include <kern/thread_call.h>
97 #include <netinet/in.h>
98 #include <netinet/tcp.h>
100 #include <nfs/rpcv2.h>
101 #include <nfs/krpc.h>
102 #include <nfs/nfsproto.h>
104 #include <nfs/xdr_subs.h>
105 #include <nfs/nfsm_subs.h>
106 #include <nfs/nfs_gss.h>
107 #include <nfs/nfsmount.h>
108 #include <nfs/nfsnode.h>
110 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
113 boolean_t
current_thread_aborted(void);
114 kern_return_t
thread_terminate(thread_t
);
118 int nfsrv_sock_max_rec_queue_length
= 128; /* max # RPC records queued on (UDP) socket */
120 int nfsrv_getstream(struct nfsrv_sock
*,int);
121 int nfsrv_getreq(struct nfsrv_descript
*);
122 extern int nfsv3_procid
[NFS_NPROCS
];
123 #endif /* NFSSERVER */
126 * compare two sockaddr structures
129 nfs_sockaddr_cmp(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
135 if (sa1
->sa_family
!= sa2
->sa_family
)
136 return ((sa1
->sa_family
< sa2
->sa_family
) ? -1 : 1);
137 if (sa1
->sa_len
!= sa2
->sa_len
)
138 return ((sa1
->sa_len
< sa2
->sa_len
) ? -1 : 1);
139 if (sa1
->sa_family
== AF_INET
)
140 return (bcmp(&((struct sockaddr_in
*)sa1
)->sin_addr
,
141 &((struct sockaddr_in
*)sa2
)->sin_addr
, sizeof(((struct sockaddr_in
*)sa1
)->sin_addr
)));
142 if (sa1
->sa_family
== AF_INET6
)
143 return (bcmp(&((struct sockaddr_in6
*)sa1
)->sin6_addr
,
144 &((struct sockaddr_in6
*)sa2
)->sin6_addr
, sizeof(((struct sockaddr_in6
*)sa1
)->sin6_addr
)));
150 int nfs_connect_search_new_socket(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
151 int nfs_connect_search_socket_connect(struct nfsmount
*, struct nfs_socket
*, int);
152 int nfs_connect_search_ping(struct nfsmount
*, struct nfs_socket
*, struct timeval
*);
153 void nfs_connect_search_socket_found(struct nfsmount
*, struct nfs_socket_search
*, struct nfs_socket
*);
154 void nfs_connect_search_socket_reap(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
155 int nfs_connect_search_check(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
156 int nfs_reconnect(struct nfsmount
*);
157 int nfs_connect_setup(struct nfsmount
*);
158 void nfs_mount_sock_thread(void *, wait_result_t
);
159 void nfs_udp_rcv(socket_t
, void*, int);
160 void nfs_tcp_rcv(socket_t
, void*, int);
161 void nfs_sock_poke(struct nfsmount
*);
162 void nfs_request_match_reply(struct nfsmount
*, mbuf_t
);
163 void nfs_reqdequeue(struct nfsreq
*);
164 void nfs_reqbusy(struct nfsreq
*);
165 struct nfsreq
*nfs_reqnext(struct nfsreq
*);
166 int nfs_wait_reply(struct nfsreq
*);
167 void nfs_softterm(struct nfsreq
*);
168 int nfs_can_squish(struct nfsmount
*);
169 int nfs_is_squishy(struct nfsmount
*);
170 int nfs_is_dead(int, struct nfsmount
*);
173 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
174 * Use the mean and mean deviation of rtt for the appropriate type of rpc
175 * for the frequent rpcs and a default for the others.
176 * The justification for doing "other" this way is that these rpcs
177 * happen so infrequently that timer est. would probably be stale.
178 * Also, since many of these rpcs are
179 * non-idempotent, a conservative timeout is desired.
180 * getattr, lookup - A+2D
184 #define NFS_RTO(n, t) \
185 ((t) == 0 ? (n)->nm_timeo : \
187 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
188 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
189 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
190 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
193 * Defines which timer to use for the procnum.
200 static int proct
[NFS_NPROCS
] = {
201 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0
205 * There is a congestion window for outstanding rpcs maintained per mount
206 * point. The cwnd size is adjusted in roughly the way that:
207 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
208 * SIGCOMM '88". ACM, August 1988.
209 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
210 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
211 * of rpcs is in progress.
212 * (The sent count and cwnd are scaled for integer arith.)
213 * Variants of "slow start" were tried and were found to be too much of a
214 * performance hit (ave. rtt 3 times larger),
215 * I suspect due to the large rtt that nfs rpcs have.
217 #define NFS_CWNDSCALE 256
218 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
219 static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
222 * Increment location index to next address/server/location.
225 nfs_location_next(struct nfs_fs_locations
*nlp
, struct nfs_location_index
*nlip
)
227 uint8_t loc
= nlip
->nli_loc
;
228 uint8_t serv
= nlip
->nli_serv
;
229 uint8_t addr
= nlip
->nli_addr
;
231 /* move to next address */
233 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
234 /* no more addresses on current server, go to first address of next server */
238 if (serv
>= nlp
->nl_locations
[loc
]->nl_servcount
) {
239 /* no more servers on current location, go to first server of next location */
242 if (loc
>= nlp
->nl_numlocs
)
243 loc
= 0; /* after last location, wrap back around to first location */
247 * It's possible for this next server to not have any addresses.
248 * Check for that here and go to the next server.
249 * But bail out if we've managed to come back around to the original
250 * location that was passed in. (That would mean no servers had any
251 * addresses. And we don't want to spin here forever.)
253 if ((loc
== nlip
->nli_loc
) && (serv
== nlip
->nli_serv
) && (addr
== nlip
->nli_addr
))
255 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
)
259 nlip
->nli_serv
= serv
;
260 nlip
->nli_addr
= addr
;
264 * Compare two location indices.
267 nfs_location_index_cmp(struct nfs_location_index
*nlip1
, struct nfs_location_index
*nlip2
)
269 if (nlip1
->nli_loc
!= nlip2
->nli_loc
)
270 return (nlip1
->nli_loc
- nlip2
->nli_loc
);
271 if (nlip1
->nli_serv
!= nlip2
->nli_serv
)
272 return (nlip1
->nli_serv
- nlip2
->nli_serv
);
273 return (nlip1
->nli_addr
- nlip2
->nli_addr
);
277 * Get the mntfromname (or path portion only) for a given location.
280 nfs_location_mntfromname(struct nfs_fs_locations
*locs
, struct nfs_location_index idx
, char *s
, int size
, int pathonly
)
282 struct nfs_fs_location
*fsl
= locs
->nl_locations
[idx
.nli_loc
];
288 cnt
= snprintf(p
, size
, "%s:", fsl
->nl_servers
[idx
.nli_serv
]->ns_name
);
292 if (fsl
->nl_path
.np_compcount
== 0) {
293 /* mounting root export on server */
300 /* append each server path component */
301 for (i
=0; (size
> 0) && (i
< (int)fsl
->nl_path
.np_compcount
); i
++) {
302 cnt
= snprintf(p
, size
, "/%s", fsl
->nl_path
.np_components
[i
]);
309 * NFS client connect socket upcall.
310 * (Used only during socket connect/search.)
313 nfs_connect_upcall(socket_t so
, void *arg
, __unused
int waitflag
)
315 struct nfs_socket
*nso
= arg
;
318 int error
= 0, recv
= 1;
320 if (nso
->nso_flags
& NSO_CONNECTING
) {
321 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting\n", nso
);
322 wakeup(nso
->nso_wake
);
326 lck_mtx_lock(&nso
->nso_lock
);
327 if ((nso
->nso_flags
& (NSO_UPCALL
|NSO_DISCONNECTING
|NSO_DEAD
)) || !(nso
->nso_flags
& NSO_PINGING
)) {
328 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso
);
329 lck_mtx_unlock(&nso
->nso_lock
);
332 NFS_SOCK_DBG("nfs connect - socket %p upcall\n", nso
);
333 nso
->nso_flags
|= NSO_UPCALL
;
335 /* loop while we make error-free progress */
336 while (!error
&& recv
) {
337 /* make sure we're still interested in this socket */
338 if (nso
->nso_flags
& (NSO_DISCONNECTING
|NSO_DEAD
))
340 lck_mtx_unlock(&nso
->nso_lock
);
342 if (nso
->nso_sotype
== SOCK_STREAM
) {
343 error
= nfs_rpc_record_read(so
, &nso
->nso_rrs
, MSG_DONTWAIT
, &recv
, &m
);
346 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
349 lck_mtx_lock(&nso
->nso_lock
);
351 /* match response with request */
352 struct nfsm_chain nmrep
;
353 uint32_t reply
= 0, rxid
= 0, verf_type
, verf_len
;
354 uint32_t reply_status
, rejected_status
, accepted_status
;
356 nfsm_chain_dissect_init(error
, &nmrep
, m
);
357 nfsm_chain_get_32(error
, &nmrep
, rxid
);
358 nfsm_chain_get_32(error
, &nmrep
, reply
);
359 if (!error
&& ((reply
!= RPC_REPLY
) || (rxid
!= nso
->nso_pingxid
)))
361 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
362 if (!error
&& (reply_status
== RPC_MSGDENIED
)) {
363 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
365 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
367 nfsm_chain_get_32(error
, &nmrep
, verf_type
); /* verifier flavor */
368 nfsm_chain_get_32(error
, &nmrep
, verf_len
); /* verifier length */
371 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
372 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
374 if ((accepted_status
== RPC_PROGMISMATCH
) && !nso
->nso_version
) {
375 uint32_t minvers
, maxvers
;
376 nfsm_chain_get_32(error
, &nmrep
, minvers
);
377 nfsm_chain_get_32(error
, &nmrep
, maxvers
);
379 if (nso
->nso_protocol
== PMAPPROG
) {
380 if ((minvers
> RPCBVERS4
) || (maxvers
< PMAPVERS
))
381 error
= EPROGMISMATCH
;
382 else if ((nso
->nso_saddr
->sa_family
== AF_INET
) &&
383 (PMAPVERS
>= minvers
) && (PMAPVERS
<= maxvers
))
384 nso
->nso_version
= PMAPVERS
;
385 else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
386 if ((RPCBVERS4
>= minvers
) && (RPCBVERS4
<= maxvers
))
387 nso
->nso_version
= RPCBVERS4
;
388 else if ((RPCBVERS3
>= minvers
) && (RPCBVERS3
<= maxvers
))
389 nso
->nso_version
= RPCBVERS3
;
391 } else if (nso
->nso_protocol
== NFS_PROG
) {
395 * N.B. Both portmapper and rpcbind V3 are happy to return
396 * addresses for other versions than the one you ask (getport or
397 * getaddr) and thus we may have fallen to this code path. So if
398 * we get a version that we support, use highest supported
399 * version. This assumes that the server supports all versions
400 * between minvers and maxvers. Note for IPv6 we will try and
401 * use rpcbind V4 which has getversaddr and we should not get
402 * here if that was successful.
404 for (vers
= nso
->nso_nfs_max_vers
; vers
>= (int)nso
->nso_nfs_min_vers
; vers
--) {
405 if (vers
>= (int)minvers
&& vers
<= (int)maxvers
)
408 nso
->nso_version
= (vers
< (int)nso
->nso_nfs_min_vers
) ? 0 : vers
;
410 if (!error
&& nso
->nso_version
)
411 accepted_status
= RPC_SUCCESS
;
414 switch (accepted_status
) {
418 case RPC_PROGUNAVAIL
:
419 error
= EPROGUNAVAIL
;
421 case RPC_PROGMISMATCH
:
422 error
= EPROGMISMATCH
;
424 case RPC_PROCUNAVAIL
:
425 error
= EPROCUNAVAIL
;
437 nso
->nso_flags
&= ~NSO_PINGING
;
439 nso
->nso_error
= error
;
440 nso
->nso_flags
|= NSO_DEAD
;
442 nso
->nso_flags
|= NSO_VERIFIED
;
445 /* wake up search thread */
446 wakeup(nso
->nso_wake
);
451 nso
->nso_flags
&= ~NSO_UPCALL
;
452 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
453 /* problems with the socket... */
454 nso
->nso_error
= error
? error
: EPIPE
;
455 nso
->nso_flags
|= NSO_DEAD
;
456 wakeup(nso
->nso_wake
);
458 if (nso
->nso_flags
& NSO_DISCONNECTING
)
459 wakeup(&nso
->nso_flags
);
460 lck_mtx_unlock(&nso
->nso_lock
);
464 * Create/initialize an nfs_socket structure.
468 struct nfsmount
*nmp
,
475 struct nfs_socket
**nsop
)
477 struct nfs_socket
*nso
;
480 #ifdef NFS_SOCKET_DEBUGGING
481 char naddr
[MAX_IPv6_STR_LEN
];
484 if (sa
->sa_family
== AF_INET
)
485 sinaddr
= &((struct sockaddr_in
*)sa
)->sin_addr
;
487 sinaddr
= &((struct sockaddr_in6
*)sa
)->sin6_addr
;
488 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
)
489 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
491 char naddr
[1] = { 0 };
496 /* Create the socket. */
497 MALLOC(nso
, struct nfs_socket
*, sizeof(struct nfs_socket
), M_TEMP
, M_WAITOK
|M_ZERO
);
499 MALLOC(nso
->nso_saddr
, struct sockaddr
*, sa
->sa_len
, M_SONAME
, M_WAITOK
|M_ZERO
);
500 if (!nso
|| !nso
->nso_saddr
) {
505 lck_mtx_init(&nso
->nso_lock
, nfs_request_grp
, LCK_ATTR_NULL
);
506 nso
->nso_sotype
= sotype
;
507 if (nso
->nso_sotype
== SOCK_STREAM
)
508 nfs_rpc_record_state_init(&nso
->nso_rrs
);
510 nso
->nso_timestamp
= now
.tv_sec
;
511 bcopy(sa
, nso
->nso_saddr
, sa
->sa_len
);
512 if (sa
->sa_family
== AF_INET
)
513 ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
514 else if (sa
->sa_family
== AF_INET6
)
515 ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
516 nso
->nso_protocol
= protocol
;
517 nso
->nso_version
= vers
;
518 nso
->nso_nfs_min_vers
= PVER2MAJOR(nmp
->nm_min_vers
);
519 nso
->nso_nfs_max_vers
= PVER2MAJOR(nmp
->nm_max_vers
);
521 error
= sock_socket(sa
->sa_family
, nso
->nso_sotype
, 0, NULL
, NULL
, &nso
->nso_so
);
523 /* Some servers require that the client port be a reserved port number. */
524 if (!error
&& resvport
&& ((sa
->sa_family
== AF_INET
) || (sa
->sa_family
== AF_INET6
))) {
525 struct sockaddr_storage ss
;
526 int level
= (sa
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
527 int optname
= (sa
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
528 int portrange
= IP_PORTRANGE_LOW
;
530 error
= sock_setsockopt(nso
->nso_so
, level
, optname
, &portrange
, sizeof(portrange
));
531 if (!error
) { /* bind now to check for failure */
532 ss
.ss_len
= sa
->sa_len
;
533 ss
.ss_family
= sa
->sa_family
;
534 if (ss
.ss_family
== AF_INET
) {
535 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
536 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
537 } else if (ss
.ss_family
== AF_INET6
) {
538 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
539 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
544 error
= sock_bind(nso
->nso_so
, (struct sockaddr
*)&ss
);
549 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
550 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nso
, naddr
, sotype
,
551 resvport
? "r" : "", port
, protocol
, vers
);
552 nfs_socket_destroy(nso
);
554 NFS_SOCK_DBG("nfs connect %s created socket %p %s type %d%s port %d prot %d %d\n",
555 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, naddr
,
556 sotype
, resvport
? "r" : "", port
, protocol
, vers
);
563 * Destroy an nfs_socket structure.
566 nfs_socket_destroy(struct nfs_socket
*nso
)
568 struct timespec ts
= { 4, 0 };
570 lck_mtx_lock(&nso
->nso_lock
);
571 nso
->nso_flags
|= NSO_DISCONNECTING
;
572 if (nso
->nso_flags
& NSO_UPCALL
) /* give upcall a chance to complete */
573 msleep(&nso
->nso_flags
, &nso
->nso_lock
, PZERO
-1, "nfswaitupcall", &ts
);
574 lck_mtx_unlock(&nso
->nso_lock
);
575 sock_shutdown(nso
->nso_so
, SHUT_RDWR
);
576 sock_close(nso
->nso_so
);
577 if (nso
->nso_sotype
== SOCK_STREAM
)
578 nfs_rpc_record_state_cleanup(&nso
->nso_rrs
);
579 lck_mtx_destroy(&nso
->nso_lock
, nfs_request_grp
);
581 FREE(nso
->nso_saddr
, M_SONAME
);
583 FREE(nso
->nso_saddr2
, M_SONAME
);
584 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso
);
589 * Set common socket options on an nfs_socket.
592 nfs_socket_options(struct nfsmount
*nmp
, struct nfs_socket
*nso
)
595 * Set socket send/receive timeouts
596 * - Receive timeout shouldn't matter because most receives are performed
597 * in the socket upcall non-blocking.
598 * - Send timeout should allow us to react to a blocked socket.
599 * Soft mounts will want to abort sooner.
601 struct timeval timeo
;
605 timeo
.tv_sec
= (NMFLAG(nmp
, SOFT
) || nfs_can_squish(nmp
)) ? 5 : 60;
606 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
607 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
608 if (nso
->nso_sotype
== SOCK_STREAM
) {
609 /* Assume that SOCK_STREAM always requires a connection */
610 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_KEEPALIVE
, &on
, sizeof(on
));
611 /* set nodelay for TCP */
612 sock_gettype(nso
->nso_so
, NULL
, NULL
, &proto
);
613 if (proto
== IPPROTO_TCP
)
614 sock_setsockopt(nso
->nso_so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
616 if (nso
->nso_sotype
== SOCK_DGRAM
) { /* set socket buffer sizes for UDP */
617 int reserve
= NFS_UDPSOCKBUF
;
618 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDBUF
, &reserve
, sizeof(reserve
));
619 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVBUF
, &reserve
, sizeof(reserve
));
621 /* set SO_NOADDRERR to detect network changes ASAP */
622 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
623 /* just playin' it safe with upcalls */
624 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
625 /* socket should be interruptible if the mount is */
626 if (!NMFLAG(nmp
, INTR
))
627 sock_nointerrupt(nso
->nso_so
, 1);
631 * Release resources held in an nfs_socket_search.
634 nfs_socket_search_cleanup(struct nfs_socket_search
*nss
)
636 struct nfs_socket
*nso
, *nsonext
;
638 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
639 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
641 nfs_socket_destroy(nso
);
644 nfs_socket_destroy(nss
->nss_sock
);
645 nss
->nss_sock
= NULL
;
650 * Prefer returning certain errors over others.
651 * This function returns a ranking of the given error.
654 nfs_connect_error_class(int error
)
689 * Make sure a socket search returns the best error.
692 nfs_socket_search_update_error(struct nfs_socket_search
*nss
, int error
)
694 if (nfs_connect_error_class(error
) >= nfs_connect_error_class(nss
->nss_error
))
695 nss
->nss_error
= error
;
698 /* nfs_connect_search_new_socket:
699 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
702 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
703 * could not be used or if a socket timed out.
706 nfs_connect_search_new_socket(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
708 struct nfs_fs_location
*fsl
;
709 struct nfs_fs_server
*fss
;
710 struct sockaddr_storage ss
;
711 struct nfs_socket
*nso
;
716 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
717 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
->nss_addrcnt
);
720 * while there are addresses and:
721 * we have no sockets or
722 * the last address failed and did not produce a socket (nss_last < 0) or
723 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
724 * then attempt to create a socket with the current address.
726 while (nss
->nss_addrcnt
> 0 && ((nss
->nss_last
< 0) || (nss
->nss_sockcnt
== 0) ||
727 ((nss
->nss_sockcnt
< 4) && (now
->tv_sec
>= (nss
->nss_last
+ 2))))) {
728 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
730 /* Can we convert the address to a sockaddr? */
731 fsl
= nmp
->nm_locations
.nl_locations
[nss
->nss_nextloc
.nli_loc
];
732 fss
= fsl
->nl_servers
[nss
->nss_nextloc
.nli_serv
];
733 addrstr
= fss
->ns_addresses
[nss
->nss_nextloc
.nli_addr
];
734 if (!nfs_uaddr2sockaddr(addrstr
, (struct sockaddr
*)&ss
)) {
735 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
736 nss
->nss_addrcnt
-= 1;
740 /* Check that socket family is acceptable. */
741 if (nmp
->nm_sofamily
&& (ss
.ss_family
!= nmp
->nm_sofamily
)) {
742 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
743 nss
->nss_addrcnt
-= 1;
748 /* Create the socket. */
749 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nss
->nss_sotype
,
750 nss
->nss_port
, nss
->nss_protocol
, nss
->nss_version
,
751 ((nss
->nss_protocol
== NFS_PROG
) && NMFLAG(nmp
, RESVPORT
)), &nso
);
755 nso
->nso_location
= nss
->nss_nextloc
;
757 error
= sock_setupcall(nso
->nso_so
, nfs_connect_upcall
, nso
);
759 lck_mtx_lock(&nso
->nso_lock
);
760 nso
->nso_error
= error
;
761 nso
->nso_flags
|= NSO_DEAD
;
762 lck_mtx_unlock(&nso
->nso_lock
);
765 TAILQ_INSERT_TAIL(&nss
->nss_socklist
, nso
, nso_link
);
767 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
768 nss
->nss_addrcnt
-= 1;
770 nss
->nss_last
= now
->tv_sec
;
773 if (nss
->nss_addrcnt
== 0 && nss
->nss_last
< 0)
774 nss
->nss_last
= now
->tv_sec
;
780 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
781 * If successful set the socket options for the socket as require from the mount.
783 * Assumes: nso->nso_lock is held on entry and return.
786 nfs_connect_search_socket_connect(struct nfsmount
*nmp
, struct nfs_socket
*nso
, int verbose
)
790 if ((nso
->nso_sotype
!= SOCK_STREAM
) && NMFLAG(nmp
, NOCONNECT
)) {
791 /* no connection needed, just say it's already connected */
792 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
793 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
794 nso
->nso_flags
|= NSO_CONNECTED
;
795 nfs_socket_options(nmp
, nso
);
796 return (1); /* Socket is connected and setup */
797 } else if (!(nso
->nso_flags
& NSO_CONNECTING
)) {
798 /* initiate the connection */
799 nso
->nso_flags
|= NSO_CONNECTING
;
800 lck_mtx_unlock(&nso
->nso_lock
);
801 NFS_SOCK_DBG("nfs connect %s connecting socket %p\n",
802 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
803 error
= sock_connect(nso
->nso_so
, nso
->nso_saddr
, MSG_DONTWAIT
);
804 lck_mtx_lock(&nso
->nso_lock
);
805 if (error
&& (error
!= EINPROGRESS
)) {
806 nso
->nso_error
= error
;
807 nso
->nso_flags
|= NSO_DEAD
;
811 if (nso
->nso_flags
& NSO_CONNECTING
) {
812 /* check the connection */
813 if (sock_isconnected(nso
->nso_so
)) {
814 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
815 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
816 nso
->nso_flags
&= ~NSO_CONNECTING
;
817 nso
->nso_flags
|= NSO_CONNECTED
;
818 nfs_socket_options(nmp
, nso
);
819 return (1); /* Socket is connected and setup */
821 int optlen
= sizeof(error
);
823 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &error
, &optlen
);
824 if (error
) { /* we got an error on the socket */
825 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
826 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
828 printf("nfs connect socket error %d for %s\n",
829 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
830 nso
->nso_error
= error
;
831 nso
->nso_flags
|= NSO_DEAD
;
837 return (0); /* Waiting to be connected */
841 * nfs_connect_search_ping: Send a null proc on the nso socket.
844 nfs_connect_search_ping(struct nfsmount
*nmp
, struct nfs_socket
*nso
, struct timeval
*now
)
846 /* initiate a NULL RPC request */
847 uint64_t xid
= nso
->nso_pingxid
;
848 mbuf_t m
, mreq
= NULL
;
850 size_t reqlen
, sentlen
;
851 uint32_t vers
= nso
->nso_version
;
855 if (nso
->nso_protocol
== PMAPPROG
)
856 vers
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
857 else if (nso
->nso_protocol
== NFS_PROG
)
858 vers
= PVER2MAJOR(nmp
->nm_max_vers
);
860 lck_mtx_unlock(&nso
->nso_lock
);
861 error
= nfsm_rpchead2(nmp
, nso
->nso_sotype
, nso
->nso_protocol
, vers
, 0, RPCAUTH_SYS
,
862 vfs_context_ucred(vfs_context_kernel()), NULL
, NULL
, &xid
, &mreq
);
863 lck_mtx_lock(&nso
->nso_lock
);
865 nso
->nso_flags
|= NSO_PINGING
;
866 nso
->nso_pingxid
= R_XID32(xid
);
867 nso
->nso_reqtimestamp
= now
->tv_sec
;
868 bzero(&msg
, sizeof(msg
));
869 if ((nso
->nso_sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
)) {
870 msg
.msg_name
= nso
->nso_saddr
;
871 msg
.msg_namelen
= nso
->nso_saddr
->sa_len
;
873 for (reqlen
=0, m
=mreq
; m
; m
= mbuf_next(m
))
874 reqlen
+= mbuf_len(m
);
875 lck_mtx_unlock(&nso
->nso_lock
);
876 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreq
, 0, &sentlen
);
877 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
878 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
879 lck_mtx_lock(&nso
->nso_lock
);
880 if (!error
&& (sentlen
!= reqlen
))
884 nso
->nso_error
= error
;
885 nso
->nso_flags
|= NSO_DEAD
;
893 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
894 * Set the nfs socket protocol and version if needed.
897 nfs_connect_search_socket_found(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct nfs_socket
*nso
)
899 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
900 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
901 if (!nso
->nso_version
) {
902 /* If the version isn't set, the default must have worked. */
903 if (nso
->nso_protocol
== PMAPPROG
)
904 nso
->nso_version
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
905 if (nso
->nso_protocol
== NFS_PROG
)
906 nso
->nso_version
= PVER2MAJOR(nmp
->nm_max_vers
);
908 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
914 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
915 * the list. Dead socket are then destroyed.
918 nfs_connect_search_socket_reap(struct nfsmount
*nmp __unused
, struct nfs_socket_search
*nss
, struct timeval
*now
)
920 struct nfs_socket
*nso
, *nsonext
;
922 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
923 lck_mtx_lock(&nso
->nso_lock
);
924 if (now
->tv_sec
>= (nso
->nso_timestamp
+ nss
->nss_timeo
)) {
926 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
927 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
928 nso
->nso_error
= ETIMEDOUT
;
929 nso
->nso_flags
|= NSO_DEAD
;
931 if (!(nso
->nso_flags
& NSO_DEAD
)) {
932 lck_mtx_unlock(&nso
->nso_lock
);
935 lck_mtx_unlock(&nso
->nso_lock
);
936 NFS_SOCK_DBG("nfs connect %s reaping socket %p %d\n",
937 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, nso
->nso_error
);
938 nfs_socket_search_update_error(nss
, nso
->nso_error
);
939 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
941 nfs_socket_destroy(nso
);
942 /* If there are more sockets to try, force the starting of another socket */
943 if (nss
->nss_addrcnt
> 0)
949 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
952 nfs_connect_search_check(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
956 /* log a warning if connect is taking a while */
957 if (((now
->tv_sec
- nss
->nss_timestamp
) >= 8) && ((nss
->nss_flags
& (NSS_VERBOSE
|NSS_WARNED
)) == NSS_VERBOSE
)) {
958 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
959 nss
->nss_flags
|= NSS_WARNED
;
961 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
963 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 0)))
966 /* If we were succesfull at sending a ping, wait up to a second for a reply */
967 if (nss
->nss_last
>= 0)
968 tsleep(nss
, PSOCK
, "nfs_connect_search_wait", hz
);
975 * Continue the socket search until we have something to report.
978 nfs_connect_search_loop(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
)
980 struct nfs_socket
*nso
;
983 int verbose
= (nss
->nss_flags
& NSS_VERBOSE
);
987 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, now
.tv_sec
);
989 /* add a new socket to the socket list if needed and available */
990 error
= nfs_connect_search_new_socket(nmp
, nss
, &now
);
992 NFS_SOCK_DBG("nfs connect returned %d\n", error
);
996 /* check each active socket on the list and try to push it along */
997 TAILQ_FOREACH(nso
, &nss
->nss_socklist
, nso_link
) {
998 lck_mtx_lock(&nso
->nso_lock
);
1000 /* If not connected connect it */
1001 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
1002 if (!nfs_connect_search_socket_connect(nmp
, nso
, verbose
)) {
1003 lck_mtx_unlock(&nso
->nso_lock
);
1008 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1009 if (!(nso
->nso_flags
& (NSO_PINGING
|NSO_VERIFIED
)) ||
1010 ((nso
->nso_sotype
== SOCK_DGRAM
) && (now
.tv_sec
>= nso
->nso_reqtimestamp
+2))) {
1011 if (!nfs_connect_search_ping(nmp
, nso
, &now
)) {
1012 lck_mtx_unlock(&nso
->nso_lock
);
1017 /* Has the socket been verified by the up call routine? */
1018 if (nso
->nso_flags
& NSO_VERIFIED
) {
1019 /* WOOHOO!! This socket looks good! */
1020 nfs_connect_search_socket_found(nmp
, nss
, nso
);
1021 lck_mtx_unlock(&nso
->nso_lock
);
1024 lck_mtx_unlock(&nso
->nso_lock
);
1027 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1028 nfs_connect_search_socket_reap(nmp
, nss
, &now
);
1031 * Keep looping if we haven't found a socket yet and we have more
1032 * sockets to (continue to) try.
1035 if (!nss
->nss_sock
&& (!TAILQ_EMPTY(&nss
->nss_socklist
) || nss
->nss_addrcnt
)) {
1036 error
= nfs_connect_search_check(nmp
, nss
, &now
);
1041 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1046 * Initialize a new NFS connection.
1048 * Search for a location to connect a socket to and initialize the connection.
1050 * An NFS mount may have multiple locations/servers/addresses available.
1051 * We attempt to connect to each one asynchronously and will start
1052 * several sockets in parallel if other locations are slow to answer.
1053 * We'll use the first NFS socket we can successfully set up.
1055 * The search may involve contacting the portmapper service first.
1057 * A mount's initial connection may require negotiating some parameters such
1058 * as socket type and NFS version.
1062 nfs_connect(struct nfsmount
*nmp
, int verbose
, int timeo
)
1064 struct nfs_socket_search nss
;
1065 struct nfs_socket
*nso
, *nsonfs
;
1066 struct sockaddr_storage ss
;
1067 struct sockaddr
*saddr
, *oldsaddr
;
1069 struct timeval now
, start
;
1070 int error
, savederror
, nfsvers
;
1072 uint8_t sotype
= nmp
->nm_sotype
? nmp
->nm_sotype
: SOCK_STREAM
;
1073 fhandle_t
*fh
= NULL
;
1078 /* paranoia... check that we have at least one address in the locations */
1080 for (loc
=0; loc
< nmp
->nm_locations
.nl_numlocs
; loc
++) {
1081 for (serv
=0; serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
; serv
++) {
1082 addrtotal
+= nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
;
1083 if (nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
== 0)
1084 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1085 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1086 nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_name
);
1090 if (addrtotal
== 0) {
1091 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1092 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1095 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1096 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, addrtotal
);
1098 lck_mtx_lock(&nmp
->nm_lock
);
1099 nmp
->nm_sockflags
|= NMSOCK_CONNECTING
;
1101 lck_mtx_unlock(&nmp
->nm_lock
);
1102 microuptime(&start
);
1103 savederror
= error
= 0;
1106 /* initialize socket search state */
1107 bzero(&nss
, sizeof(nss
));
1108 nss
.nss_addrcnt
= addrtotal
;
1109 nss
.nss_error
= savederror
;
1110 TAILQ_INIT(&nss
.nss_socklist
);
1111 nss
.nss_sotype
= sotype
;
1112 nss
.nss_startloc
= nmp
->nm_locations
.nl_current
;
1113 nss
.nss_timestamp
= start
.tv_sec
;
1114 nss
.nss_timeo
= timeo
;
1116 nss
.nss_flags
|= NSS_VERBOSE
;
1118 /* First time connecting, we may need to negotiate some things */
1119 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1120 if (!nmp
->nm_vers
) {
1121 /* No NFS version specified... */
1122 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1123 if (PVER2MAJOR(nmp
->nm_max_vers
) >= NFS_VER4
&& tryv4
) {
1124 nss
.nss_port
= NFS_PORT
;
1125 nss
.nss_protocol
= NFS_PROG
;
1126 nss
.nss_version
= 4;
1127 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1129 /* ...connect to portmapper first if we (may) need any ports. */
1130 nss
.nss_port
= PMAPPORT
;
1131 nss
.nss_protocol
= PMAPPROG
;
1132 nss
.nss_version
= 0;
1135 /* ...connect to NFS port first. */
1136 nss
.nss_port
= nmp
->nm_nfsport
;
1137 nss
.nss_protocol
= NFS_PROG
;
1138 nss
.nss_version
= 0;
1140 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1142 /* For NFSv4, we use the given (or default) port. */
1143 nss
.nss_port
= nmp
->nm_nfsport
? nmp
->nm_nfsport
: NFS_PORT
;
1144 nss
.nss_protocol
= NFS_PROG
;
1145 nss
.nss_version
= 4;
1147 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1148 * if no port is specified on the mount;
1149 * Note nm_vers is set so we will only try NFS_VER4.
1151 if (!nmp
->nm_nfsport
)
1152 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1154 nss
.nss_port
= PMAPPORT
;
1155 nss
.nss_protocol
= PMAPPROG
;
1156 nss
.nss_version
= 0;
1159 /* For NFSv3/v2... */
1160 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1161 /* ...connect to portmapper first if we need any ports. */
1162 nss
.nss_port
= PMAPPORT
;
1163 nss
.nss_protocol
= PMAPPROG
;
1164 nss
.nss_version
= 0;
1166 /* ...connect to NFS port first. */
1167 nss
.nss_port
= nmp
->nm_nfsport
;
1168 nss
.nss_protocol
= NFS_PROG
;
1169 nss
.nss_version
= nmp
->nm_vers
;
1172 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1173 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1174 nss
.nss_protocol
, nss
.nss_version
);
1176 /* we've connected before, just connect to NFS port */
1177 if (!nmp
->nm_nfsport
) {
1178 /* need to ask portmapper which port that would be */
1179 nss
.nss_port
= PMAPPORT
;
1180 nss
.nss_protocol
= PMAPPROG
;
1181 nss
.nss_version
= 0;
1183 nss
.nss_port
= nmp
->nm_nfsport
;
1184 nss
.nss_protocol
= NFS_PROG
;
1185 nss
.nss_version
= nmp
->nm_vers
;
1187 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1188 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1189 nss
.nss_protocol
, nss
.nss_version
);
1192 /* Set next location to first valid location. */
1193 /* If start location is invalid, find next location. */
1194 nss
.nss_nextloc
= nss
.nss_startloc
;
1195 if ((nss
.nss_nextloc
.nli_serv
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servcount
) ||
1196 (nss
.nss_nextloc
.nli_addr
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servers
[nss
.nss_nextloc
.nli_serv
]->ns_addrcount
)) {
1197 nfs_location_next(&nmp
->nm_locations
, &nss
.nss_nextloc
);
1198 if (!nfs_location_index_cmp(&nss
.nss_nextloc
, &nss
.nss_startloc
)) {
1199 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1200 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1208 error
= nfs_connect_search_loop(nmp
, &nss
);
1209 if (error
|| !nss
.nss_sock
) {
1211 nfs_socket_search_cleanup(&nss
);
1212 if (nss
.nss_flags
& NSS_FALLBACK2PMAP
) {
1214 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1215 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1219 if (!error
&& (nss
.nss_sotype
== SOCK_STREAM
) && !nmp
->nm_sotype
&& (nmp
->nm_vers
< NFS_VER4
)) {
1221 sotype
= SOCK_DGRAM
;
1222 savederror
= nss
.nss_error
;
1223 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1224 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1228 error
= nss
.nss_error
? nss
.nss_error
: ETIMEDOUT
;
1229 lck_mtx_lock(&nmp
->nm_lock
);
1230 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1232 lck_mtx_unlock(&nmp
->nm_lock
);
1233 if (nss
.nss_flags
& NSS_WARNED
)
1234 log(LOG_INFO
, "nfs_connect: socket connect aborted for %s\n",
1235 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1239 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1240 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1241 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1245 /* try to use nss_sock */
1247 nss
.nss_sock
= NULL
;
1249 /* We may be speaking to portmap first... to determine port(s). */
1250 if (nso
->nso_saddr
->sa_family
== AF_INET
)
1251 port
= ntohs(((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
);
1253 port
= ntohs(((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
);
1254 if (port
== PMAPPORT
) {
1255 /* Use this portmapper port to get the port #s we need. */
1256 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1257 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1259 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1260 sock_setupcall(nso
->nso_so
, NULL
, NULL
);
1262 /* Set up socket address and port for NFS socket. */
1263 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1265 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1266 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: PVER2MAJOR(nmp
->nm_max_vers
);
1267 if (!(port
= nmp
->nm_nfsport
)) {
1268 if (ss
.ss_family
== AF_INET
)
1269 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
1270 else if (ss
.ss_family
== AF_INET6
)
1271 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
1272 for (; nfsvers
>= (int)PVER2MAJOR(nmp
->nm_min_vers
); nfsvers
--) {
1273 if (nmp
->nm_vers
&& nmp
->nm_vers
!= nfsvers
)
1274 continue; /* Wrong version */
1275 if (nfsvers
== NFS_VER4
&& nso
->nso_sotype
== SOCK_DGRAM
)
1276 continue; /* NFSv4 does not do UDP */
1277 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1278 nso
->nso_so
, NFS_PROG
, nfsvers
,
1279 (nso
->nso_sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
, timeo
);
1281 if (ss
.ss_family
== AF_INET
)
1282 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1283 else if (ss
.ss_family
== AF_INET6
)
1284 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1286 error
= EPROGUNAVAIL
;
1287 if (port
== NFS_PORT
&& nfsvers
== NFS_VER4
&& tryv4
== 0)
1288 continue; /* We already tried this */
1293 if (nfsvers
< (int)PVER2MAJOR(nmp
->nm_min_vers
) && error
== 0)
1294 error
= EPROGUNAVAIL
;
1296 nfs_socket_search_update_error(&nss
, error
);
1297 nfs_socket_destroy(nso
);
1301 /* Create NFS protocol socket and add it to the list of sockets. */
1302 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1303 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nso
->nso_sotype
, port
,
1304 NFS_PROG
, nfsvers
, NMFLAG(nmp
, RESVPORT
), &nsonfs
);
1306 nfs_socket_search_update_error(&nss
, error
);
1307 nfs_socket_destroy(nso
);
1310 nsonfs
->nso_location
= nso
->nso_location
;
1311 nsonfs
->nso_wake
= &nss
;
1312 error
= sock_setupcall(nsonfs
->nso_so
, nfs_connect_upcall
, nsonfs
);
1314 nfs_socket_search_update_error(&nss
, error
);
1315 nfs_socket_destroy(nsonfs
);
1316 nfs_socket_destroy(nso
);
1319 TAILQ_INSERT_TAIL(&nss
.nss_socklist
, nsonfs
, nso_link
);
1321 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1322 /* Set up socket address and port for MOUNT socket. */
1324 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1325 port
= nmp
->nm_mountport
;
1326 if (ss
.ss_family
== AF_INET
)
1327 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1328 else if (ss
.ss_family
== AF_INET6
)
1329 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1331 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1332 /* If NFS version is unknown, optimistically choose for NFSv3. */
1333 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1334 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1335 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1336 nso
->nso_so
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1339 if (ss
.ss_family
== AF_INET
)
1340 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1341 else if (ss
.ss_family
== AF_INET6
)
1342 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1344 error
= EPROGUNAVAIL
;
1346 /* create sockaddr for MOUNT */
1348 MALLOC(nsonfs
->nso_saddr2
, struct sockaddr
*, ss
.ss_len
, M_SONAME
, M_WAITOK
|M_ZERO
);
1349 if (!error
&& !nsonfs
->nso_saddr2
)
1352 bcopy(&ss
, nsonfs
->nso_saddr2
, ss
.ss_len
);
1354 lck_mtx_lock(&nsonfs
->nso_lock
);
1355 nsonfs
->nso_error
= error
;
1356 nsonfs
->nso_flags
|= NSO_DEAD
;
1357 lck_mtx_unlock(&nsonfs
->nso_lock
);
1360 nfs_socket_destroy(nso
);
1364 /* nso is an NFS socket */
1365 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1367 /* If NFS version wasn't specified, it was determined during the connect. */
1368 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: (int)nso
->nso_version
;
1370 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1371 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1373 saddr
= nso
->nso_saddr2
;
1375 /* Need sockaddr for MOUNT port */
1376 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1377 port
= nmp
->nm_mountport
;
1378 if (ss
.ss_family
== AF_INET
)
1379 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1380 else if (ss
.ss_family
== AF_INET6
)
1381 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1383 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1384 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1385 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1386 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1387 NULL
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1388 if (ss
.ss_family
== AF_INET
)
1389 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1390 else if (ss
.ss_family
== AF_INET6
)
1391 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1395 saddr
= (struct sockaddr
*)&ss
;
1397 error
= EPROGUNAVAIL
;
1401 MALLOC(fh
, fhandle_t
*, sizeof(fhandle_t
), M_TEMP
, M_WAITOK
|M_ZERO
);
1403 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1404 if (!saddr
|| !fh
|| !path
) {
1410 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1413 nfs_socket_search_update_error(&nss
, error
);
1414 nfs_socket_destroy(nso
);
1417 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
, path
, MAXPATHLEN
, 1);
1418 error
= nfs3_mount_rpc(nmp
, saddr
, nso
->nso_sotype
, nfsvers
,
1419 path
, vfs_context_current(), timeo
, fh
, &nmp
->nm_servsec
);
1420 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1421 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1423 /* Make sure we can agree on a security flavor. */
1424 int o
, s
; /* indices into mount option and server security flavor lists */
1427 if ((nfsvers
== NFS_VER3
) && !nmp
->nm_servsec
.count
) {
1428 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1429 nmp
->nm_servsec
.count
= 1;
1430 nmp
->nm_servsec
.flavors
[0] = RPCAUTH_SYS
;
1432 if (nmp
->nm_sec
.count
) {
1433 /* Choose the first flavor in our list that the server supports. */
1434 if (!nmp
->nm_servsec
.count
) {
1435 /* we don't know what the server supports, just use our first choice */
1436 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[0];
1439 for (o
=0; !found
&& (o
< nmp
->nm_sec
.count
); o
++)
1440 for (s
=0; !found
&& (s
< nmp
->nm_servsec
.count
); s
++)
1441 if (nmp
->nm_sec
.flavors
[o
] == nmp
->nm_servsec
.flavors
[s
]) {
1442 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[o
];
1446 /* Choose the first one we support from the server's list. */
1447 if (!nmp
->nm_servsec
.count
) {
1448 nmp
->nm_auth
= RPCAUTH_SYS
;
1451 for (s
=0; s
< nmp
->nm_servsec
.count
; s
++)
1452 switch (nmp
->nm_servsec
.flavors
[s
]) {
1454 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1455 if (found
&& (nmp
->nm_auth
== RPCAUTH_NONE
))
1462 nmp
->nm_auth
= nmp
->nm_servsec
.flavors
[s
];
1468 error
= !found
? EAUTH
: 0;
1470 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1473 nfs_socket_search_update_error(&nss
, error
);
1476 nfs_socket_destroy(nso
);
1480 FREE(nmp
->nm_fh
, M_TEMP
);
1483 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_CALLUMNT
);
1486 /* put the real upcall in place */
1487 upcall
= (nso
->nso_sotype
== SOCK_STREAM
) ? nfs_tcp_rcv
: nfs_udp_rcv
;
1488 error
= sock_setupcall(nso
->nso_so
, upcall
, nmp
);
1490 nfs_socket_search_update_error(&nss
, error
);
1491 nfs_socket_destroy(nso
);
1495 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1496 /* set mntfromname to this location */
1497 if (!NM_OMATTR_GIVEN(nmp
, MNTFROM
))
1498 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
,
1499 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1500 sizeof(vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
), 0);
1501 /* some negotiated values need to remain unchanged for the life of the mount */
1502 if (!nmp
->nm_sotype
)
1503 nmp
->nm_sotype
= nso
->nso_sotype
;
1504 if (!nmp
->nm_vers
) {
1505 nmp
->nm_vers
= nfsvers
;
1506 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1507 if ((nfsvers
>= NFS_VER4
) && !NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1508 if (nso
->nso_saddr
->sa_family
== AF_INET
)
1509 port
= ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
1510 else if (nso
->nso_saddr
->sa_family
== AF_INET6
)
1511 port
= ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
1514 if (port
== NFS_PORT
)
1515 nmp
->nm_nfsport
= NFS_PORT
;
1518 /* do some version-specific pre-mount set up */
1519 if (nmp
->nm_vers
>= NFS_VER4
) {
1521 nmp
->nm_mounttime
= ((uint64_t)now
.tv_sec
<< 32) | now
.tv_usec
;
1522 if (!NMFLAG(nmp
, NOCALLBACK
))
1523 nfs4_mount_callback_setup(nmp
);
1527 /* Initialize NFS socket state variables */
1528 lck_mtx_lock(&nmp
->nm_lock
);
1529 nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] =
1530 nmp
->nm_srtt
[3] = (NFS_TIMEO
<< 3);
1531 nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
1532 nmp
->nm_sdrtt
[3] = 0;
1533 if (nso
->nso_sotype
== SOCK_DGRAM
) {
1534 nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
1536 } else if (nso
->nso_sotype
== SOCK_STREAM
) {
1537 nmp
->nm_timeouts
= 0;
1539 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1540 nmp
->nm_sockflags
|= NMSOCK_SETUP
;
1541 /* move the socket to the mount structure */
1543 oldsaddr
= nmp
->nm_saddr
;
1544 nmp
->nm_saddr
= nso
->nso_saddr
;
1545 lck_mtx_unlock(&nmp
->nm_lock
);
1546 error
= nfs_connect_setup(nmp
);
1547 lck_mtx_lock(&nmp
->nm_lock
);
1548 nmp
->nm_sockflags
&= ~NMSOCK_SETUP
;
1550 nmp
->nm_sockflags
|= NMSOCK_READY
;
1551 wakeup(&nmp
->nm_sockflags
);
1554 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1555 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1556 nfs_socket_search_update_error(&nss
, error
);
1557 nmp
->nm_saddr
= oldsaddr
;
1558 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1559 /* undo settings made prior to setup */
1560 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_SOCKET_TYPE
))
1562 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_VERSION
)) {
1563 if (nmp
->nm_vers
>= NFS_VER4
) {
1564 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
))
1565 nmp
->nm_nfsport
= 0;
1567 nfs4_mount_callback_shutdown(nmp
);
1568 if (IS_VALID_CRED(nmp
->nm_mcred
))
1569 kauth_cred_unref(&nmp
->nm_mcred
);
1570 bzero(&nmp
->nm_un
, sizeof(nmp
->nm_un
));
1575 lck_mtx_unlock(&nmp
->nm_lock
);
1577 nfs_socket_destroy(nso
);
1581 /* update current location */
1582 if ((nmp
->nm_locations
.nl_current
.nli_flags
& NLI_VALID
) &&
1583 (nmp
->nm_locations
.nl_current
.nli_serv
!= nso
->nso_location
.nli_serv
)) {
1584 /* server has changed, we should initiate failover/recovery */
1587 nmp
->nm_locations
.nl_current
= nso
->nso_location
;
1588 nmp
->nm_locations
.nl_current
.nli_flags
|= NLI_VALID
;
1590 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1591 /* We have now successfully connected... make a note of it. */
1592 nmp
->nm_sockflags
|= NMSOCK_HASCONNECTED
;
1595 lck_mtx_unlock(&nmp
->nm_lock
);
1597 FREE(oldsaddr
, M_SONAME
);
1599 if (nss
.nss_flags
& NSS_WARNED
)
1600 log(LOG_INFO
, "nfs_connect: socket connect completed for %s\n",
1601 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1604 nfs_socket_search_cleanup(&nss
);
1608 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1609 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1614 /* setup & confirm socket connection is functional */
1616 nfs_connect_setup(struct nfsmount
*nmp
)
1620 if (nmp
->nm_vers
>= NFS_VER4
) {
1621 if (nmp
->nm_state
& NFSSTA_CLIENTID
) {
1622 /* first, try to renew our current state */
1623 error
= nfs4_renew(nmp
, R_SETUP
);
1624 if ((error
== NFSERR_ADMIN_REVOKED
) ||
1625 (error
== NFSERR_CB_PATH_DOWN
) ||
1626 (error
== NFSERR_EXPIRED
) ||
1627 (error
== NFSERR_LEASE_MOVED
) ||
1628 (error
== NFSERR_STALE_CLIENTID
)) {
1629 lck_mtx_lock(&nmp
->nm_lock
);
1630 nfs_need_recover(nmp
, error
);
1631 lck_mtx_unlock(&nmp
->nm_lock
);
1634 error
= nfs4_setclientid(nmp
);
1640 * NFS socket reconnect routine:
1641 * Called when a connection is broken.
1642 * - disconnect the old socket
1643 * - nfs_connect() again
1644 * - set R_MUSTRESEND for all outstanding requests on mount point
1645 * If this fails the mount point is DEAD!
1648 nfs_reconnect(struct nfsmount
*nmp
)
1652 thread_t thd
= current_thread();
1653 int error
, wentdown
= 0, verbose
= 1;
1658 lastmsg
= now
.tv_sec
- (nmp
->nm_tprintf_delay
- nmp
->nm_tprintf_initial_delay
);
1660 nfs_disconnect(nmp
);
1663 lck_mtx_lock(&nmp
->nm_lock
);
1664 timeo
= nfs_is_squishy(nmp
) ? 8 : 30;
1665 lck_mtx_unlock(&nmp
->nm_lock
);
1667 while ((error
= nfs_connect(nmp
, verbose
, timeo
))) {
1669 nfs_disconnect(nmp
);
1670 if ((error
== EINTR
) || (error
== ERESTART
))
1675 if ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
) {
1676 lastmsg
= now
.tv_sec
;
1677 nfs_down(nmp
, thd
, error
, NFSSTA_TIMEO
, "can not connect", 0);
1680 lck_mtx_lock(&nmp
->nm_lock
);
1681 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1682 /* we're not yet completely mounted and */
1683 /* we can't reconnect, so we fail */
1684 lck_mtx_unlock(&nmp
->nm_lock
);
1685 NFS_SOCK_DBG("Not mounted returning %d\n", error
);
1689 if (nfs_mount_check_dead_timeout(nmp
)) {
1690 nfs_mount_make_zombie(nmp
);
1691 lck_mtx_unlock(&nmp
->nm_lock
);
1695 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1696 lck_mtx_unlock(&nmp
->nm_lock
);
1699 lck_mtx_unlock(&nmp
->nm_lock
);
1700 tsleep(nfs_reconnect
, PSOCK
, "nfs_reconnect_delay", 2*hz
);
1701 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1706 nfs_up(nmp
, thd
, NFSSTA_TIMEO
, "connected");
1709 * Loop through outstanding request list and mark all requests
1710 * as needing a resend. (Though nfs_need_reconnect() probably
1711 * marked them all already.)
1713 lck_mtx_lock(nfs_request_mutex
);
1714 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1715 if (rq
->r_nmp
== nmp
) {
1716 lck_mtx_lock(&rq
->r_mtx
);
1717 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1718 rq
->r_flags
|= R_MUSTRESEND
;
1721 if ((rq
->r_flags
& (R_IOD
|R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
1722 nfs_asyncio_resend(rq
);
1724 lck_mtx_unlock(&rq
->r_mtx
);
1727 lck_mtx_unlock(nfs_request_mutex
);
1732 * NFS disconnect. Clean up and unlink.
1735 nfs_disconnect(struct nfsmount
*nmp
)
1737 struct nfs_socket
*nso
;
1739 lck_mtx_lock(&nmp
->nm_lock
);
1742 struct timespec ts
= { 1, 0 };
1743 if (nmp
->nm_state
& NFSSTA_SENDING
) { /* wait for sending to complete */
1744 nmp
->nm_state
|= NFSSTA_WANTSND
;
1745 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, PZERO
-1, "nfswaitsending", &ts
);
1748 if (nmp
->nm_sockflags
& NMSOCK_POKE
) { /* wait for poking to complete */
1749 msleep(&nmp
->nm_sockflags
, &nmp
->nm_lock
, PZERO
-1, "nfswaitpoke", &ts
);
1752 nmp
->nm_sockflags
|= NMSOCK_DISCONNECTING
;
1753 nmp
->nm_sockflags
&= ~NMSOCK_READY
;
1756 if (nso
->nso_saddr
== nmp
->nm_saddr
)
1757 nso
->nso_saddr
= NULL
;
1758 lck_mtx_unlock(&nmp
->nm_lock
);
1759 nfs_socket_destroy(nso
);
1760 lck_mtx_lock(&nmp
->nm_lock
);
1761 nmp
->nm_sockflags
&= ~NMSOCK_DISCONNECTING
;
1762 lck_mtx_unlock(&nmp
->nm_lock
);
1764 lck_mtx_unlock(&nmp
->nm_lock
);
1769 * mark an NFS mount as needing a reconnect/resends.
1772 nfs_need_reconnect(struct nfsmount
*nmp
)
1776 lck_mtx_lock(&nmp
->nm_lock
);
1777 nmp
->nm_sockflags
&= ~(NMSOCK_READY
|NMSOCK_SETUP
);
1778 lck_mtx_unlock(&nmp
->nm_lock
);
1781 * Loop through outstanding request list and
1782 * mark all requests as needing a resend.
1784 lck_mtx_lock(nfs_request_mutex
);
1785 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1786 if (rq
->r_nmp
== nmp
) {
1787 lck_mtx_lock(&rq
->r_mtx
);
1788 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1789 rq
->r_flags
|= R_MUSTRESEND
;
1792 if ((rq
->r_flags
& (R_IOD
|R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
1793 nfs_asyncio_resend(rq
);
1795 lck_mtx_unlock(&rq
->r_mtx
);
1798 lck_mtx_unlock(nfs_request_mutex
);
1803 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
1806 nfs_mount_sock_thread(void *arg
, __unused wait_result_t wr
)
1808 struct nfsmount
*nmp
= arg
;
1809 struct timespec ts
= { 30, 0 };
1810 thread_t thd
= current_thread();
1813 int error
, dofinish
;
1815 int do_reconnect_sleep
= 0;
1817 lck_mtx_lock(&nmp
->nm_lock
);
1818 while (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
1819 !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
1820 !LIST_EMPTY(&nmp
->nm_monlist
) ||
1821 nmp
->nm_deadto_start
||
1822 (nmp
->nm_state
& NFSSTA_RECOVER
) ||
1823 ((nmp
->nm_vers
>= NFS_VER4
) && !TAILQ_EMPTY(&nmp
->nm_dreturnq
)))
1825 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
1827 /* do reconnect, if necessary */
1828 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))) {
1829 if (nmp
->nm_reconnect_start
<= 0) {
1831 nmp
->nm_reconnect_start
= now
.tv_sec
;
1833 lck_mtx_unlock(&nmp
->nm_lock
);
1834 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1836 * XXX We don't want to call reconnect again right away if returned errors
1837 * before that may not have blocked. This has caused spamming null procs
1838 * from machines in the pass.
1840 if (do_reconnect_sleep
)
1841 tsleep(nfs_mount_sock_thread
, PSOCK
, "nfs_reconnect_sock_thread_delay", hz
);
1842 error
= nfs_reconnect(nmp
);
1845 if (error
== EIO
|| error
== EINTR
) {
1846 lvl
= (do_reconnect_sleep
++ % 600) ? 7 : 0;
1848 nfs_printf(NFS_FAC_SOCK
, lvl
, "nfs reconnect %s: returned %d\n",
1849 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1851 nmp
->nm_reconnect_start
= 0;
1852 do_reconnect_sleep
= 0;
1854 lck_mtx_lock(&nmp
->nm_lock
);
1856 if ((nmp
->nm_sockflags
& NMSOCK_READY
) &&
1857 (nmp
->nm_state
& NFSSTA_RECOVER
) &&
1858 !(nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
1859 !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))) {
1860 /* perform state recovery */
1861 lck_mtx_unlock(&nmp
->nm_lock
);
1863 lck_mtx_lock(&nmp
->nm_lock
);
1865 /* handle NFSv4 delegation returns */
1866 while ((nmp
->nm_vers
>= NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) &&
1867 (nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
) &&
1868 ((np
= TAILQ_FIRST(&nmp
->nm_dreturnq
)))) {
1869 lck_mtx_unlock(&nmp
->nm_lock
);
1870 nfs4_delegation_return(np
, R_RECOVER
, thd
, nmp
->nm_mcred
);
1871 lck_mtx_lock(&nmp
->nm_lock
);
1873 /* do resends, if necessary/possible */
1874 while ((((nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) ||
1875 (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))) &&
1876 ((req
= TAILQ_FIRST(&nmp
->nm_resendq
)))) {
1877 if (req
->r_resendtime
)
1879 while (req
&& !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) && req
->r_resendtime
&& (now
.tv_sec
< req
->r_resendtime
))
1880 req
= TAILQ_NEXT(req
, r_rchain
);
1883 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
1884 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
1885 lck_mtx_unlock(&nmp
->nm_lock
);
1886 lck_mtx_lock(&req
->r_mtx
);
1887 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
1888 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
1889 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1890 req
->r_flags
&= ~R_RESENDQ
;
1892 lck_mtx_unlock(&req
->r_mtx
);
1894 nfs_asyncio_finish(req
);
1895 nfs_request_rele(req
);
1896 lck_mtx_lock(&nmp
->nm_lock
);
1899 if ((req
->r_flags
& R_RESTART
) || nfs_request_using_gss(req
)) {
1900 req
->r_flags
&= ~R_RESTART
;
1901 req
->r_resendtime
= 0;
1902 lck_mtx_unlock(&req
->r_mtx
);
1903 /* async RPCs on GSS mounts need to be rebuilt and resent. */
1904 nfs_reqdequeue(req
);
1905 if (nfs_request_using_gss(req
)) {
1906 nfs_gss_clnt_rpcdone(req
);
1907 error
= nfs_gss_clnt_args_restore(req
);
1908 if (error
== ENEEDAUTH
)
1911 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
1912 nfs_request_using_gss(req
) ? " gss" : "", req
->r_procnum
, req
->r_xid
,
1913 req
->r_flags
, req
->r_rtt
);
1914 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
1916 error
= nfs_request_add_header(req
);
1918 error
= nfs_request_send(req
, 0);
1919 lck_mtx_lock(&req
->r_mtx
);
1920 if (req
->r_flags
& R_RESENDQ
)
1921 req
->r_flags
&= ~R_RESENDQ
;
1923 req
->r_error
= error
;
1925 dofinish
= error
&& req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1926 lck_mtx_unlock(&req
->r_mtx
);
1928 nfs_asyncio_finish(req
);
1929 nfs_request_rele(req
);
1930 lck_mtx_lock(&nmp
->nm_lock
);
1934 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
1935 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
1936 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
1938 req
->r_flags
|= R_SENDING
;
1939 lck_mtx_unlock(&req
->r_mtx
);
1940 error
= nfs_send(req
, 0);
1941 lck_mtx_lock(&req
->r_mtx
);
1943 if (req
->r_flags
& R_RESENDQ
)
1944 req
->r_flags
&= ~R_RESENDQ
;
1946 lck_mtx_unlock(&req
->r_mtx
);
1947 nfs_request_rele(req
);
1948 lck_mtx_lock(&nmp
->nm_lock
);
1952 req
->r_error
= error
;
1953 if (req
->r_flags
& R_RESENDQ
)
1954 req
->r_flags
&= ~R_RESENDQ
;
1956 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1957 lck_mtx_unlock(&req
->r_mtx
);
1959 nfs_asyncio_finish(req
);
1960 nfs_request_rele(req
);
1961 lck_mtx_lock(&nmp
->nm_lock
);
1963 if (nfs_mount_check_dead_timeout(nmp
)) {
1964 nfs_mount_make_zombie(nmp
);
1968 if (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))
1970 /* check monitored nodes, if necessary/possible */
1971 if (!LIST_EMPTY(&nmp
->nm_monlist
)) {
1972 nmp
->nm_state
|= NFSSTA_MONITOR_SCAN
;
1973 LIST_FOREACH(np
, &nmp
->nm_monlist
, n_monlink
) {
1974 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
1975 (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
|NFSSTA_FORCE
|NFSSTA_DEAD
)))
1977 np
->n_mflag
|= NMMONSCANINPROG
;
1978 lck_mtx_unlock(&nmp
->nm_lock
);
1979 error
= nfs_getattr(np
, NULL
, vfs_context_kernel(), (NGA_UNCACHED
|NGA_MONITOR
));
1980 if (!error
&& ISSET(np
->n_flag
, NUPDATESIZE
)) /* update quickly to avoid multiple events */
1981 nfs_data_update_size(np
, 0);
1982 lck_mtx_lock(&nmp
->nm_lock
);
1983 np
->n_mflag
&= ~NMMONSCANINPROG
;
1984 if (np
->n_mflag
& NMMONSCANWANT
) {
1985 np
->n_mflag
&= ~NMMONSCANWANT
;
1986 wakeup(&np
->n_mflag
);
1988 if (error
|| !(nmp
->nm_sockflags
& NMSOCK_READY
) ||
1989 (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
|NFSSTA_FORCE
|NFSSTA_DEAD
)))
1992 nmp
->nm_state
&= ~NFSSTA_MONITOR_SCAN
;
1993 if (nmp
->nm_state
& NFSSTA_UNMOUNTING
)
1994 wakeup(&nmp
->nm_state
); /* let unmounting thread know scan is done */
1996 if ((nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
))) {
1997 if (nmp
->nm_deadto_start
|| !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
1998 (nmp
->nm_state
& NFSSTA_RECOVER
))
2002 msleep(&nmp
->nm_sockthd
, &nmp
->nm_lock
, PSOCK
, "nfssockthread", &ts
);
2006 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2007 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2008 (nmp
->nm_state
& NFSSTA_MOUNTED
) && NMFLAG(nmp
, CALLUMNT
) &&
2009 (nmp
->nm_vers
< NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))) {
2010 lck_mtx_unlock(&nmp
->nm_lock
);
2011 nfs3_umount_rpc(nmp
, vfs_context_kernel(),
2012 (nmp
->nm_sockflags
& NMSOCK_READY
) ? 6 : 2);
2013 lck_mtx_lock(&nmp
->nm_lock
);
2016 if (nmp
->nm_sockthd
== thd
)
2017 nmp
->nm_sockthd
= NULL
;
2018 lck_mtx_unlock(&nmp
->nm_lock
);
2019 wakeup(&nmp
->nm_sockthd
);
2020 thread_terminate(thd
);
2023 /* start or wake a mount's socket thread */
2025 nfs_mount_sock_thread_wake(struct nfsmount
*nmp
)
2027 if (nmp
->nm_sockthd
)
2028 wakeup(&nmp
->nm_sockthd
);
2029 else if (kernel_thread_start(nfs_mount_sock_thread
, nmp
, &nmp
->nm_sockthd
) == KERN_SUCCESS
)
2030 thread_deallocate(nmp
->nm_sockthd
);
2034 * Check if we should mark the mount dead because the
2035 * unresponsive mount has reached the dead timeout.
2036 * (must be called with nmp locked)
2039 nfs_mount_check_dead_timeout(struct nfsmount
*nmp
)
2043 if (nmp
->nm_state
& NFSSTA_DEAD
)
2045 if (nmp
->nm_deadto_start
== 0)
2047 nfs_is_squishy(nmp
);
2048 if (nmp
->nm_curdeadtimeout
<= 0)
2051 if ((now
.tv_sec
- nmp
->nm_deadto_start
) < nmp
->nm_curdeadtimeout
)
2057 * Call nfs_mount_zombie to remove most of the
2058 * nfs state for the mount, and then ask to be forcibly unmounted.
2060 * Assumes the nfs mount structure lock nm_lock is held.
2064 nfs_mount_make_zombie(struct nfsmount
*nmp
)
2071 if (nmp
->nm_state
& NFSSTA_DEAD
)
2074 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
2075 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
2076 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
2077 lck_mtx_unlock(&nmp
->nm_lock
);
2078 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
2079 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
2080 lck_mtx_lock(&nmp
->nm_lock
);
2085 * NFS callback channel socket state
2087 struct nfs_callback_socket
2089 TAILQ_ENTRY(nfs_callback_socket
) ncbs_link
;
2090 socket_t ncbs_so
; /* the socket */
2091 struct sockaddr_storage ncbs_saddr
; /* socket address */
2092 struct nfs_rpc_record_state ncbs_rrs
; /* RPC record parsing state */
2093 time_t ncbs_stamp
; /* last accessed at */
2094 uint32_t ncbs_flags
; /* see below */
2096 #define NCBSOCK_UPCALL 0x0001
2097 #define NCBSOCK_UPCALLWANT 0x0002
2098 #define NCBSOCK_DEAD 0x0004
2101 * NFS callback channel state
2103 * One listening socket for accepting socket connections from servers and
2104 * a list of connected sockets to handle callback requests on.
2105 * Mounts registered with the callback channel are assigned IDs and
2106 * put on a list so that the callback request handling code can match
2107 * the requests up with mounts.
2109 socket_t nfs4_cb_so
= NULL
;
2110 socket_t nfs4_cb_so6
= NULL
;
2111 in_port_t nfs4_cb_port
= 0;
2112 in_port_t nfs4_cb_port6
= 0;
2113 uint32_t nfs4_cb_id
= 0;
2114 uint32_t nfs4_cb_so_usecount
= 0;
2115 TAILQ_HEAD(nfs4_cb_sock_list
,nfs_callback_socket
) nfs4_cb_socks
;
2116 TAILQ_HEAD(nfs4_cb_mount_list
,nfsmount
) nfs4_cb_mounts
;
2118 int nfs4_cb_handler(struct nfs_callback_socket
*, mbuf_t
);
2121 * Set up the callback channel for the NFS mount.
2123 * Initializes the callback channel socket state and
2124 * assigns a callback ID to the mount.
2127 nfs4_mount_callback_setup(struct nfsmount
*nmp
)
2129 struct sockaddr_in sin
;
2130 struct sockaddr_in6 sin6
;
2132 socket_t so6
= NULL
;
2133 struct timeval timeo
;
2137 lck_mtx_lock(nfs_global_mutex
);
2138 if (nfs4_cb_id
== 0) {
2139 TAILQ_INIT(&nfs4_cb_mounts
);
2140 TAILQ_INIT(&nfs4_cb_socks
);
2143 nmp
->nm_cbid
= nfs4_cb_id
++;
2144 if (nmp
->nm_cbid
== 0)
2145 nmp
->nm_cbid
= nfs4_cb_id
++;
2146 nfs4_cb_so_usecount
++;
2147 TAILQ_INSERT_HEAD(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2150 lck_mtx_unlock(nfs_global_mutex
);
2155 error
= sock_socket(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so
);
2157 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv4 socket\n", error
);
2162 sock_setsockopt(so
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2163 sin
.sin_len
= sizeof(struct sockaddr_in
);
2164 sin
.sin_family
= AF_INET
;
2165 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
2166 sin
.sin_port
= htons(nfs_callback_port
); /* try to use specified port */
2167 error
= sock_bind(so
, (struct sockaddr
*)&sin
);
2169 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv4 socket\n", error
);
2172 error
= sock_getsockname(so
, (struct sockaddr
*)&sin
, sin
.sin_len
);
2174 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv4 socket port\n", error
);
2177 nfs4_cb_port
= ntohs(sin
.sin_port
);
2179 error
= sock_listen(so
, 32);
2181 log(LOG_INFO
, "nfs callback setup: error %d on IPv4 listen\n", error
);
2185 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2188 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2190 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error
);
2191 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2193 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error
);
2194 sock_setsockopt(so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2195 sock_setsockopt(so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2196 sock_setsockopt(so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2200 error
= sock_socket(AF_INET6
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so6
);
2202 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv6 socket\n", error
);
2207 sock_setsockopt(so6
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2208 sock_setsockopt(so6
, IPPROTO_IPV6
, IPV6_V6ONLY
, &on
, sizeof(on
));
2209 /* try to use specified port or same port as IPv4 */
2210 port
= nfs_callback_port
? nfs_callback_port
: nfs4_cb_port
;
2212 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2213 sin6
.sin6_family
= AF_INET6
;
2214 sin6
.sin6_addr
= in6addr_any
;
2215 sin6
.sin6_port
= htons(port
);
2216 error
= sock_bind(so6
, (struct sockaddr
*)&sin6
);
2218 if (port
!= nfs_callback_port
) {
2219 /* if we simply tried to match the IPv4 port, then try any port */
2221 goto ipv6_bind_again
;
2223 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv6 socket\n", error
);
2226 error
= sock_getsockname(so6
, (struct sockaddr
*)&sin6
, sin6
.sin6_len
);
2228 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv6 socket port\n", error
);
2231 nfs4_cb_port6
= ntohs(sin6
.sin6_port
);
2233 error
= sock_listen(so6
, 32);
2235 log(LOG_INFO
, "nfs callback setup: error %d on IPv6 listen\n", error
);
2239 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2242 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2244 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error
);
2245 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2247 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error
);
2248 sock_setsockopt(so6
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2249 sock_setsockopt(so6
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2250 sock_setsockopt(so6
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2255 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2256 lck_mtx_unlock(nfs_global_mutex
);
2258 sock_shutdown(so
, SHUT_RDWR
);
2262 sock_shutdown(so6
, SHUT_RDWR
);
2266 lck_mtx_unlock(nfs_global_mutex
);
2271 * Shut down the callback channel for the NFS mount.
2273 * Clears the mount's callback ID and releases the mounts
2274 * reference on the callback socket. Last reference dropped
2275 * will also shut down the callback socket(s).
2278 nfs4_mount_callback_shutdown(struct nfsmount
*nmp
)
2280 struct nfs_callback_socket
*ncbsp
;
2282 struct nfs4_cb_sock_list cb_socks
;
2283 struct timespec ts
= {1,0};
2285 lck_mtx_lock(nfs_global_mutex
);
2286 TAILQ_REMOVE(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2287 /* wait for any callbacks in progress to complete */
2288 while (nmp
->nm_cbrefs
)
2289 msleep(&nmp
->nm_cbrefs
, nfs_global_mutex
, PSOCK
, "cbshutwait", &ts
);
2291 if (--nfs4_cb_so_usecount
) {
2292 lck_mtx_unlock(nfs_global_mutex
);
2297 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2298 TAILQ_INIT(&cb_socks
);
2299 TAILQ_CONCAT(&cb_socks
, &nfs4_cb_socks
, ncbs_link
);
2300 lck_mtx_unlock(nfs_global_mutex
);
2302 sock_shutdown(so
, SHUT_RDWR
);
2306 sock_shutdown(so6
, SHUT_RDWR
);
2309 while ((ncbsp
= TAILQ_FIRST(&cb_socks
))) {
2310 TAILQ_REMOVE(&cb_socks
, ncbsp
, ncbs_link
);
2311 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2312 sock_close(ncbsp
->ncbs_so
);
2313 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2314 FREE(ncbsp
, M_TEMP
);
2319 * Check periodically for stale/unused nfs callback sockets
2321 #define NFS4_CB_TIMER_PERIOD 30
2322 #define NFS4_CB_IDLE_MAX 300
2324 nfs4_callback_timer(__unused
void *param0
, __unused
void *param1
)
2326 struct nfs_callback_socket
*ncbsp
, *nextncbsp
;
2330 lck_mtx_lock(nfs_global_mutex
);
2331 if (TAILQ_EMPTY(&nfs4_cb_socks
)) {
2332 nfs4_callback_timer_on
= 0;
2333 lck_mtx_unlock(nfs_global_mutex
);
2337 TAILQ_FOREACH_SAFE(ncbsp
, &nfs4_cb_socks
, ncbs_link
, nextncbsp
) {
2338 if (!(ncbsp
->ncbs_flags
& NCBSOCK_DEAD
) &&
2339 (now
.tv_sec
< (ncbsp
->ncbs_stamp
+ NFS4_CB_IDLE_MAX
)))
2341 TAILQ_REMOVE(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2342 lck_mtx_unlock(nfs_global_mutex
);
2343 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2344 sock_close(ncbsp
->ncbs_so
);
2345 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2346 FREE(ncbsp
, M_TEMP
);
2349 nfs4_callback_timer_on
= 1;
2350 nfs_interval_timer_start(nfs4_callback_timer_call
,
2351 NFS4_CB_TIMER_PERIOD
* 1000);
2352 lck_mtx_unlock(nfs_global_mutex
);
2356 * Accept a new callback socket.
2359 nfs4_cb_accept(socket_t so
, __unused
void *arg
, __unused
int waitflag
)
2361 socket_t newso
= NULL
;
2362 struct nfs_callback_socket
*ncbsp
;
2363 struct nfsmount
*nmp
;
2364 struct timeval timeo
, now
;
2365 int error
, on
= 1, ip
;
2367 if (so
== nfs4_cb_so
)
2369 else if (so
== nfs4_cb_so6
)
2374 /* allocate/initialize a new nfs_callback_socket */
2375 MALLOC(ncbsp
, struct nfs_callback_socket
*, sizeof(struct nfs_callback_socket
), M_TEMP
, M_WAITOK
);
2377 log(LOG_ERR
, "nfs callback accept: no memory for new socket\n");
2380 bzero(ncbsp
, sizeof(*ncbsp
));
2381 ncbsp
->ncbs_saddr
.ss_len
= (ip
== 4) ? sizeof(struct sockaddr_in
) : sizeof(struct sockaddr_in6
);
2382 nfs_rpc_record_state_init(&ncbsp
->ncbs_rrs
);
2384 /* accept a new socket */
2385 error
= sock_accept(so
, (struct sockaddr
*)&ncbsp
->ncbs_saddr
,
2386 ncbsp
->ncbs_saddr
.ss_len
, MSG_DONTWAIT
,
2387 nfs4_cb_rcv
, ncbsp
, &newso
);
2389 log(LOG_INFO
, "nfs callback accept: error %d accepting IPv%d socket\n", error
, ip
);
2390 FREE(ncbsp
, M_TEMP
);
2394 /* set up the new socket */
2395 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2398 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2400 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error
, ip
);
2401 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2403 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error
, ip
);
2404 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2405 sock_setsockopt(newso
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2406 sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2407 sock_setsockopt(newso
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2409 ncbsp
->ncbs_so
= newso
;
2411 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2413 lck_mtx_lock(nfs_global_mutex
);
2415 /* add it to the list */
2416 TAILQ_INSERT_HEAD(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2418 /* verify it's from a host we have mounted */
2419 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2420 /* check if socket's source address matches this mount's server address */
2423 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0)
2426 if (!nmp
) /* we don't want this socket, mark it dead */
2427 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2429 /* make sure the callback socket cleanup timer is running */
2430 /* (shorten the timer if we've got a socket we don't want) */
2431 if (!nfs4_callback_timer_on
) {
2432 nfs4_callback_timer_on
= 1;
2433 nfs_interval_timer_start(nfs4_callback_timer_call
,
2434 !nmp
? 500 : (NFS4_CB_TIMER_PERIOD
* 1000));
2435 } else if (!nmp
&& (nfs4_callback_timer_on
< 2)) {
2436 nfs4_callback_timer_on
= 2;
2437 thread_call_cancel(nfs4_callback_timer_call
);
2438 nfs_interval_timer_start(nfs4_callback_timer_call
, 500);
2441 lck_mtx_unlock(nfs_global_mutex
);
2445 * Receive mbufs from callback sockets into RPC records and process each record.
2446 * Detect connection has been closed and shut down.
2449 nfs4_cb_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
2451 struct nfs_callback_socket
*ncbsp
= arg
;
2452 struct timespec ts
= {1,0};
2455 int error
= 0, recv
= 1;
2457 lck_mtx_lock(nfs_global_mutex
);
2458 while (ncbsp
->ncbs_flags
& NCBSOCK_UPCALL
) {
2459 /* wait if upcall is already in progress */
2460 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALLWANT
;
2461 msleep(ncbsp
, nfs_global_mutex
, PSOCK
, "cbupcall", &ts
);
2463 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALL
;
2464 lck_mtx_unlock(nfs_global_mutex
);
2466 /* loop while we make error-free progress */
2467 while (!error
&& recv
) {
2468 error
= nfs_rpc_record_read(so
, &ncbsp
->ncbs_rrs
, MSG_DONTWAIT
, &recv
, &m
);
2469 if (m
) /* handle the request */
2470 error
= nfs4_cb_handler(ncbsp
, m
);
2473 /* note: no error and no data indicates server closed its end */
2474 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
2476 * Socket is either being closed or should be.
2477 * We can't close the socket in the context of the upcall.
2478 * So we mark it as dead and leave it for the cleanup timer to reap.
2480 ncbsp
->ncbs_stamp
= 0;
2481 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2484 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2487 lck_mtx_lock(nfs_global_mutex
);
2488 ncbsp
->ncbs_flags
&= ~NCBSOCK_UPCALL
;
2489 lck_mtx_unlock(nfs_global_mutex
);
2494 * Handle an NFS callback channel request.
2497 nfs4_cb_handler(struct nfs_callback_socket
*ncbsp
, mbuf_t mreq
)
2499 socket_t so
= ncbsp
->ncbs_so
;
2500 struct nfsm_chain nmreq
, nmrep
;
2501 mbuf_t mhead
= NULL
, mrest
= NULL
, m
;
2503 struct nfsmount
*nmp
;
2506 nfs_stateid stateid
;
2507 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], rbitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
, truncate
, attrbytes
;
2508 uint32_t val
, xid
, procnum
, taglen
, cbid
, numops
, op
, status
;
2509 uint32_t auth_type
, auth_len
;
2510 uint32_t numres
, *pnumres
;
2511 int error
= 0, replen
, len
;
2514 xid
= numops
= op
= status
= procnum
= taglen
= cbid
= 0;
2516 nfsm_chain_dissect_init(error
, &nmreq
, mreq
);
2517 nfsm_chain_get_32(error
, &nmreq
, xid
); // RPC XID
2518 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Call
2519 nfsm_assert(error
, (val
== RPC_CALL
), EBADRPC
);
2520 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Version
2521 nfsm_assert(error
, (val
== RPC_VER2
), ERPCMISMATCH
);
2522 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Program Number
2523 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG
), EPROGUNAVAIL
);
2524 nfsm_chain_get_32(error
, &nmreq
, val
); // NFS Callback Program Version Number
2525 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG_VERSION
), EPROGMISMATCH
);
2526 nfsm_chain_get_32(error
, &nmreq
, procnum
); // NFS Callback Procedure Number
2527 nfsm_assert(error
, (procnum
<= NFSPROC4_CB_COMPOUND
), EPROCUNAVAIL
);
2529 /* Handle authentication */
2530 /* XXX just ignore auth for now - handling kerberos may be tricky */
2531 nfsm_chain_get_32(error
, &nmreq
, auth_type
); // RPC Auth Flavor
2532 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // RPC Auth Length
2533 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2534 if (!error
&& (auth_len
> 0))
2535 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2536 nfsm_chain_adv(error
, &nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
2537 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // verifier length
2538 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2539 if (!error
&& (auth_len
> 0))
2540 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2548 case NFSPROC4_CB_NULL
:
2549 status
= NFSERR_RETVOID
;
2551 case NFSPROC4_CB_COMPOUND
:
2552 /* tag, minorversion, cb ident, numops, op array */
2553 nfsm_chain_get_32(error
, &nmreq
, taglen
); /* tag length */
2554 nfsm_assert(error
, (val
<= NFS4_OPAQUE_LIMIT
), EBADRPC
);
2556 /* start building the body of the response */
2557 nfsm_mbuf_get(error
, &mrest
, nfsm_rndup(taglen
) + 5*NFSX_UNSIGNED
);
2558 nfsm_chain_init(&nmrep
, mrest
);
2560 /* copy tag from request to response */
2561 nfsm_chain_add_32(error
, &nmrep
, taglen
); /* tag length */
2562 for (len
= (int)taglen
; !error
&& (len
> 0); len
-= NFSX_UNSIGNED
) {
2563 nfsm_chain_get_32(error
, &nmreq
, val
);
2564 nfsm_chain_add_32(error
, &nmrep
, val
);
2567 /* insert number of results placeholder */
2569 nfsm_chain_add_32(error
, &nmrep
, numres
);
2570 pnumres
= (uint32_t*)(nmrep
.nmc_ptr
- NFSX_UNSIGNED
);
2572 nfsm_chain_get_32(error
, &nmreq
, val
); /* minorversion */
2573 nfsm_assert(error
, (val
== 0), NFSERR_MINOR_VERS_MISMATCH
);
2574 nfsm_chain_get_32(error
, &nmreq
, cbid
); /* callback ID */
2575 nfsm_chain_get_32(error
, &nmreq
, numops
); /* number of operations */
2577 if ((error
== EBADRPC
) || (error
== NFSERR_MINOR_VERS_MISMATCH
))
2579 else if ((error
== ENOBUFS
) || (error
== ENOMEM
))
2580 status
= NFSERR_RESOURCE
;
2582 status
= NFSERR_SERVERFAULT
;
2584 nfsm_chain_null(&nmrep
);
2587 /* match the callback ID to a registered mount */
2588 lck_mtx_lock(nfs_global_mutex
);
2589 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2590 if (nmp
->nm_cbid
!= cbid
)
2592 /* verify socket's source address matches this mount's server address */
2595 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0)
2598 /* mark the NFS mount as busy */
2601 lck_mtx_unlock(nfs_global_mutex
);
2603 /* if no mount match, just drop socket. */
2605 nfsm_chain_null(&nmrep
);
2609 /* process ops, adding results to mrest */
2610 while (numops
> 0) {
2612 nfsm_chain_get_32(error
, &nmreq
, op
);
2616 case NFS_OP_CB_GETATTR
:
2617 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2619 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2620 bmlen
= NFS_ATTR_BITMAP_LEN
;
2621 nfsm_chain_get_bitmap(error
, &nmreq
, bitmap
, bmlen
);
2625 numops
= 0; /* don't process any more ops */
2627 /* find the node for the file handle */
2628 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2630 status
= NFSERR_BADHANDLE
;
2633 numops
= 0; /* don't process any more ops */
2636 nfsm_chain_add_32(error
, &nmrep
, op
);
2637 nfsm_chain_add_32(error
, &nmrep
, status
);
2638 if (!error
&& (status
== EBADRPC
))
2641 /* only allow returning size, change, and mtime attrs */
2642 NFS_CLEAR_ATTRIBUTES(&rbitmap
);
2644 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
2645 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_CHANGE
);
2646 attrbytes
+= 2 * NFSX_UNSIGNED
;
2648 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
2649 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_SIZE
);
2650 attrbytes
+= 2 * NFSX_UNSIGNED
;
2652 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2653 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_TIME_MODIFY
);
2654 attrbytes
+= 3 * NFSX_UNSIGNED
;
2656 nfsm_chain_add_bitmap(error
, &nmrep
, rbitmap
, NFS_ATTR_BITMAP_LEN
);
2657 nfsm_chain_add_32(error
, &nmrep
, attrbytes
);
2658 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
))
2659 nfsm_chain_add_64(error
, &nmrep
,
2660 np
->n_vattr
.nva_change
+ ((np
->n_flag
& NMODIFIED
) ? 1 : 0));
2661 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
))
2662 nfsm_chain_add_64(error
, &nmrep
, np
->n_size
);
2663 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2664 nfsm_chain_add_64(error
, &nmrep
, np
->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]);
2665 nfsm_chain_add_32(error
, &nmrep
, np
->n_vattr
.nva_timensec
[NFSTIME_MODIFY
]);
2667 nfs_node_unlock(np
);
2668 vnode_put(NFSTOV(np
));
2672 * If we hit an error building the reply, we can't easily back up.
2673 * So we'll just update the status and hope the server ignores the
2677 case NFS_OP_CB_RECALL
:
2678 // (STATEID, TRUNCATE, FH) -> (STATUS)
2680 nfsm_chain_get_stateid(error
, &nmreq
, &stateid
);
2681 nfsm_chain_get_32(error
, &nmreq
, truncate
);
2682 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2686 numops
= 0; /* don't process any more ops */
2688 /* find the node for the file handle */
2689 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2691 status
= NFSERR_BADHANDLE
;
2694 numops
= 0; /* don't process any more ops */
2695 } else if (!(np
->n_openflags
& N_DELEG_MASK
) ||
2696 bcmp(&np
->n_dstateid
, &stateid
, sizeof(stateid
))) {
2697 /* delegation stateid state doesn't match */
2698 status
= NFSERR_BAD_STATEID
;
2699 numops
= 0; /* don't process any more ops */
2701 if (!status
) /* add node to recall queue, and wake socket thread */
2702 nfs4_delegation_return_enqueue(np
);
2704 nfs_node_unlock(np
);
2705 vnode_put(NFSTOV(np
));
2708 nfsm_chain_add_32(error
, &nmrep
, op
);
2709 nfsm_chain_add_32(error
, &nmrep
, status
);
2710 if (!error
&& (status
== EBADRPC
))
2713 case NFS_OP_CB_ILLEGAL
:
2715 nfsm_chain_add_32(error
, &nmrep
, NFS_OP_CB_ILLEGAL
);
2716 status
= NFSERR_OP_ILLEGAL
;
2717 nfsm_chain_add_32(error
, &nmrep
, status
);
2718 numops
= 0; /* don't process any more ops */
2724 if (!status
&& error
) {
2725 if (error
== EBADRPC
)
2727 else if ((error
== ENOBUFS
) || (error
== ENOMEM
))
2728 status
= NFSERR_RESOURCE
;
2730 status
= NFSERR_SERVERFAULT
;
2734 /* Now, set the numres field */
2735 *pnumres
= txdr_unsigned(numres
);
2736 nfsm_chain_build_done(error
, &nmrep
);
2737 nfsm_chain_null(&nmrep
);
2739 /* drop the callback reference on the mount */
2740 lck_mtx_lock(nfs_global_mutex
);
2743 wakeup(&nmp
->nm_cbrefs
);
2744 lck_mtx_unlock(nfs_global_mutex
);
2749 if (status
== EBADRPC
)
2750 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
2752 /* build reply header */
2753 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mhead
);
2754 nfsm_chain_init(&nmrep
, mhead
);
2755 nfsm_chain_add_32(error
, &nmrep
, 0); /* insert space for an RPC record mark */
2756 nfsm_chain_add_32(error
, &nmrep
, xid
);
2757 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
2758 if ((status
== ERPCMISMATCH
) || (status
& NFSERR_AUTHERR
)) {
2759 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
2760 if (status
& NFSERR_AUTHERR
) {
2761 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
2762 nfsm_chain_add_32(error
, &nmrep
, (status
& ~NFSERR_AUTHERR
));
2764 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
2765 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2766 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2770 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
2771 /* XXX RPCAUTH_NULL verifier */
2772 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
2773 nfsm_chain_add_32(error
, &nmrep
, 0);
2774 /* accepted status */
2777 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
2780 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
2781 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2782 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2785 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
2788 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
2791 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
2792 if (status
!= NFSERR_RETVOID
)
2793 nfsm_chain_add_32(error
, &nmrep
, status
);
2797 nfsm_chain_build_done(error
, &nmrep
);
2799 nfsm_chain_null(&nmrep
);
2802 error
= mbuf_setnext(nmrep
.nmc_mcur
, mrest
);
2804 printf("nfs cb: mbuf_setnext failed %d\n", error
);
2808 /* Calculate the size of the reply */
2810 for (m
= nmrep
.nmc_mhead
; m
; m
= mbuf_next(m
))
2811 replen
+= mbuf_len(m
);
2812 mbuf_pkthdr_setlen(mhead
, replen
);
2813 error
= mbuf_pkthdr_setrcvif(mhead
, NULL
);
2814 nfsm_chain_set_recmark(error
, &nmrep
, (replen
- NFSX_UNSIGNED
) | 0x80000000);
2815 nfsm_chain_null(&nmrep
);
2817 /* send the reply */
2818 bzero(&msg
, sizeof(msg
));
2819 error
= sock_sendmbuf(so
, &msg
, mhead
, 0, &sentlen
);
2821 if (!error
&& ((int)sentlen
!= replen
))
2822 error
= EWOULDBLOCK
;
2823 if (error
== EWOULDBLOCK
) /* inability to send response is considered fatal */
2827 nfsm_chain_cleanup(&nmrep
);
2839 * Initialize an nfs_rpc_record_state structure.
2842 nfs_rpc_record_state_init(struct nfs_rpc_record_state
*nrrsp
)
2844 bzero(nrrsp
, sizeof(*nrrsp
));
2845 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
2849 * Clean up an nfs_rpc_record_state structure.
2852 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state
*nrrsp
)
2854 if (nrrsp
->nrrs_m
) {
2855 mbuf_freem(nrrsp
->nrrs_m
);
2856 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
2861 * Read the next (marked) RPC record from the socket.
2863 * *recvp returns if any data was received.
2864 * *mp returns the next complete RPC record
2867 nfs_rpc_record_read(socket_t so
, struct nfs_rpc_record_state
*nrrsp
, int flags
, int *recvp
, mbuf_t
*mp
)
2878 /* read the TCP RPC record marker */
2879 while (!error
&& nrrsp
->nrrs_markerleft
) {
2880 aio
.iov_base
= ((char*)&nrrsp
->nrrs_fragleft
+
2881 sizeof(nrrsp
->nrrs_fragleft
) - nrrsp
->nrrs_markerleft
);
2882 aio
.iov_len
= nrrsp
->nrrs_markerleft
;
2883 bzero(&msg
, sizeof(msg
));
2886 error
= sock_receive(so
, &msg
, flags
, &rcvlen
);
2887 if (error
|| !rcvlen
)
2890 nrrsp
->nrrs_markerleft
-= rcvlen
;
2891 if (nrrsp
->nrrs_markerleft
)
2893 /* record marker complete */
2894 nrrsp
->nrrs_fragleft
= ntohl(nrrsp
->nrrs_fragleft
);
2895 if (nrrsp
->nrrs_fragleft
& 0x80000000) {
2896 nrrsp
->nrrs_lastfrag
= 1;
2897 nrrsp
->nrrs_fragleft
&= ~0x80000000;
2899 nrrsp
->nrrs_reclen
+= nrrsp
->nrrs_fragleft
;
2900 if (nrrsp
->nrrs_reclen
> NFS_MAXPACKET
) {
2901 /* This is SERIOUS! We are out of sync with the sender. */
2902 log(LOG_ERR
, "impossible RPC record length (%d) on callback", nrrsp
->nrrs_reclen
);
2907 /* read the TCP RPC record fragment */
2908 while (!error
&& !nrrsp
->nrrs_markerleft
&& nrrsp
->nrrs_fragleft
) {
2910 rcvlen
= nrrsp
->nrrs_fragleft
;
2911 error
= sock_receivembuf(so
, NULL
, &m
, flags
, &rcvlen
);
2912 if (error
|| !rcvlen
|| !m
)
2915 /* append mbufs to list */
2916 nrrsp
->nrrs_fragleft
-= rcvlen
;
2917 if (!nrrsp
->nrrs_m
) {
2920 error
= mbuf_setnext(nrrsp
->nrrs_mlast
, m
);
2922 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error
);
2927 while (mbuf_next(m
))
2929 nrrsp
->nrrs_mlast
= m
;
2932 /* done reading fragment? */
2933 if (!error
&& !nrrsp
->nrrs_markerleft
&& !nrrsp
->nrrs_fragleft
) {
2934 /* reset socket fragment parsing state */
2935 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
2936 if (nrrsp
->nrrs_lastfrag
) {
2937 /* RPC record complete */
2938 *mp
= nrrsp
->nrrs_m
;
2939 /* reset socket record parsing state */
2940 nrrsp
->nrrs_reclen
= 0;
2941 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
2942 nrrsp
->nrrs_lastfrag
= 0;
2952 * The NFS client send routine.
2954 * Send the given NFS request out the mount's socket.
2955 * Holds nfs_sndlock() for the duration of this call.
2957 * - check for request termination (sigintr)
2958 * - wait for reconnect, if necessary
2959 * - UDP: check the congestion window
2960 * - make a copy of the request to send
2961 * - UDP: update the congestion window
2962 * - send the request
2964 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
2965 * rexmit count is also updated if this isn't the first send.
2967 * If the send is not successful, make sure R_MUSTRESEND is set.
2968 * If this wasn't the first transmit, set R_RESENDERR.
2969 * Also, undo any UDP congestion window changes made.
2971 * If the error appears to indicate that the socket should
2972 * be reconnected, mark the socket for reconnection.
2974 * Only return errors when the request should be aborted.
2977 nfs_send(struct nfsreq
*req
, int wait
)
2979 struct nfsmount
*nmp
;
2980 struct nfs_socket
*nso
;
2981 int error
, error2
, sotype
, rexmit
, slpflag
= 0, needrecon
;
2983 struct sockaddr
*sendnam
;
2986 struct timespec ts
= { 2, 0 };
2989 error
= nfs_sndlock(req
);
2991 lck_mtx_lock(&req
->r_mtx
);
2992 req
->r_error
= error
;
2993 req
->r_flags
&= ~R_SENDING
;
2994 lck_mtx_unlock(&req
->r_mtx
);
2998 error
= nfs_sigintr(req
->r_nmp
, req
, NULL
, 0);
3001 lck_mtx_lock(&req
->r_mtx
);
3002 req
->r_error
= error
;
3003 req
->r_flags
&= ~R_SENDING
;
3004 lck_mtx_unlock(&req
->r_mtx
);
3008 sotype
= nmp
->nm_sotype
;
3011 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3012 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3014 if (((req
->r_flags
& R_SETUP
) && !(nmp
->nm_sockflags
& NMSOCK_SETUP
)) ||
3015 ((req
->r_flags
& R_RECOVER
) && !(nmp
->nm_sockflags
& NMSOCK_READY
))) {
3018 lck_mtx_lock(&req
->r_mtx
);
3019 req
->r_error
= error
;
3020 req
->r_flags
&= ~R_SENDING
;
3021 lck_mtx_unlock(&req
->r_mtx
);
3025 /* If the socket needs reconnection, do that now. */
3026 /* wait until socket is ready - unless this request is part of setup */
3027 lck_mtx_lock(&nmp
->nm_lock
);
3028 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) &&
3029 !((nmp
->nm_sockflags
& NMSOCK_SETUP
) && (req
->r_flags
& R_SETUP
))) {
3030 if (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
))
3032 lck_mtx_unlock(&nmp
->nm_lock
);
3035 lck_mtx_lock(&req
->r_mtx
);
3036 req
->r_flags
&= ~R_SENDING
;
3037 req
->r_flags
|= R_MUSTRESEND
;
3039 lck_mtx_unlock(&req
->r_mtx
);
3042 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req
->r_xid
);
3043 lck_mtx_lock(&req
->r_mtx
);
3044 req
->r_flags
&= ~R_MUSTRESEND
;
3046 lck_mtx_unlock(&req
->r_mtx
);
3047 lck_mtx_lock(&nmp
->nm_lock
);
3048 while (!(nmp
->nm_sockflags
& NMSOCK_READY
)) {
3049 /* don't bother waiting if the socket thread won't be reconnecting it */
3050 if (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) {
3054 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (nmp
->nm_reconnect_start
> 0)) {
3057 if ((now
.tv_sec
- nmp
->nm_reconnect_start
) >= 8) {
3058 /* soft mount in reconnect for a while... terminate ASAP */
3059 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
3060 req
->r_flags
|= R_SOFTTERM
;
3061 req
->r_error
= error
= ETIMEDOUT
;
3065 /* make sure socket thread is running, then wait */
3066 nfs_mount_sock_thread_wake(nmp
);
3067 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1)))
3069 msleep(req
, &nmp
->nm_lock
, slpflag
|PSOCK
, "nfsconnectwait", &ts
);
3072 lck_mtx_unlock(&nmp
->nm_lock
);
3074 lck_mtx_lock(&req
->r_mtx
);
3075 req
->r_error
= error
;
3076 req
->r_flags
&= ~R_SENDING
;
3077 lck_mtx_unlock(&req
->r_mtx
);
3083 /* note that we're using the mount's socket to do the send */
3084 nmp
->nm_state
|= NFSSTA_SENDING
; /* will be cleared by nfs_sndunlock() */
3085 lck_mtx_unlock(&nmp
->nm_lock
);
3088 lck_mtx_lock(&req
->r_mtx
);
3089 req
->r_flags
&= ~R_SENDING
;
3090 req
->r_flags
|= R_MUSTRESEND
;
3092 lck_mtx_unlock(&req
->r_mtx
);
3096 lck_mtx_lock(&req
->r_mtx
);
3097 rexmit
= (req
->r_flags
& R_SENT
);
3099 if (sotype
== SOCK_DGRAM
) {
3100 lck_mtx_lock(&nmp
->nm_lock
);
3101 if (!(req
->r_flags
& R_CWND
) && (nmp
->nm_sent
>= nmp
->nm_cwnd
)) {
3102 /* if we can't send this out yet, wait on the cwnd queue */
3103 slpflag
= (NMFLAG(nmp
, INTR
) && req
->r_thread
) ? PCATCH
: 0;
3104 lck_mtx_unlock(&nmp
->nm_lock
);
3106 req
->r_flags
&= ~R_SENDING
;
3107 req
->r_flags
|= R_MUSTRESEND
;
3108 lck_mtx_unlock(&req
->r_mtx
);
3113 lck_mtx_lock(&nmp
->nm_lock
);
3114 while (nmp
->nm_sent
>= nmp
->nm_cwnd
) {
3115 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1)))
3117 TAILQ_INSERT_TAIL(&nmp
->nm_cwndq
, req
, r_cchain
);
3118 msleep(req
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfswaitcwnd", &ts
);
3120 if ((req
->r_cchain
.tqe_next
!= NFSREQNOLIST
)) {
3121 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3122 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3125 lck_mtx_unlock(&nmp
->nm_lock
);
3129 * We update these *before* the send to avoid racing
3130 * against others who may be looking to send requests.
3133 /* first transmit */
3134 req
->r_flags
|= R_CWND
;
3135 nmp
->nm_sent
+= NFS_CWNDSCALE
;
3138 * When retransmitting, turn timing off
3139 * and divide congestion window by 2.
3141 req
->r_flags
&= ~R_TIMING
;
3143 if (nmp
->nm_cwnd
< NFS_CWNDSCALE
)
3144 nmp
->nm_cwnd
= NFS_CWNDSCALE
;
3146 lck_mtx_unlock(&nmp
->nm_lock
);
3149 req
->r_flags
&= ~R_MUSTRESEND
;
3150 lck_mtx_unlock(&req
->r_mtx
);
3152 error
= mbuf_copym(req
->r_mhead
, 0, MBUF_COPYALL
,
3153 wait
? MBUF_WAITOK
: MBUF_DONTWAIT
, &mreqcopy
);
3156 log(LOG_INFO
, "nfs_send: mbuf copy failed %d\n", error
);
3158 lck_mtx_lock(&req
->r_mtx
);
3159 req
->r_flags
&= ~R_SENDING
;
3160 req
->r_flags
|= R_MUSTRESEND
;
3162 lck_mtx_unlock(&req
->r_mtx
);
3166 bzero(&msg
, sizeof(msg
));
3167 if ((sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
) && ((sendnam
= nmp
->nm_saddr
))) {
3168 msg
.msg_name
= (caddr_t
)sendnam
;
3169 msg
.msg_namelen
= sendnam
->sa_len
;
3171 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreqcopy
, 0, &sentlen
);
3172 if (error
|| (sentlen
!= req
->r_mreqlen
)) {
3173 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3174 req
->r_xid
, (int)sentlen
, (int)req
->r_mreqlen
, error
);
3177 if (!error
&& (sentlen
!= req
->r_mreqlen
))
3178 error
= EWOULDBLOCK
;
3179 needrecon
= ((sotype
== SOCK_STREAM
) && sentlen
&& (sentlen
!= req
->r_mreqlen
));
3181 lck_mtx_lock(&req
->r_mtx
);
3182 req
->r_flags
&= ~R_SENDING
;
3184 if (rexmit
&& (++req
->r_rexmit
> NFS_MAXREXMIT
))
3185 req
->r_rexmit
= NFS_MAXREXMIT
;
3189 req
->r_flags
&= ~R_RESENDERR
;
3191 OSAddAtomic64(1, &nfsstats
.rpcretries
);
3192 req
->r_flags
|= R_SENT
;
3193 if (req
->r_flags
& R_WAITSENT
) {
3194 req
->r_flags
&= ~R_WAITSENT
;
3198 lck_mtx_unlock(&req
->r_mtx
);
3203 req
->r_flags
|= R_MUSTRESEND
;
3205 req
->r_flags
|= R_RESENDERR
;
3206 if ((error
== EINTR
) || (error
== ERESTART
))
3207 req
->r_error
= error
;
3208 lck_mtx_unlock(&req
->r_mtx
);
3210 if (sotype
== SOCK_DGRAM
) {
3212 * Note: even though a first send may fail, we consider
3213 * the request sent for congestion window purposes.
3214 * So we don't need to undo any of the changes made above.
3217 * Socket errors ignored for connectionless sockets??
3218 * For now, ignore them all
3220 if ((error
!= EINTR
) && (error
!= ERESTART
) &&
3221 (error
!= EWOULDBLOCK
) && (error
!= EIO
) && (nso
== nmp
->nm_nso
)) {
3222 int clearerror
= 0, optlen
= sizeof(clearerror
);
3223 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &clearerror
, &optlen
);
3224 #ifdef NFS_SOCKET_DEBUGGING
3226 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3232 /* check if it appears we should reconnect the socket */
3235 /* if send timed out, reconnect if on TCP */
3236 if (sotype
!= SOCK_STREAM
)
3250 /* case ECANCELED??? */
3254 if (needrecon
&& (nso
== nmp
->nm_nso
)) { /* mark socket as needing reconnect */
3255 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req
->r_xid
, error
);
3256 nfs_need_reconnect(nmp
);
3261 if (nfs_is_dead(error
, nmp
))
3265 * Don't log some errors:
3266 * EPIPE errors may be common with servers that drop idle connections.
3267 * EADDRNOTAVAIL may occur on network transitions.
3268 * ENOTCONN may occur under some network conditions.
3270 if ((error
== EPIPE
) || (error
== EADDRNOTAVAIL
) || (error
== ENOTCONN
))
3272 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
))
3273 log(LOG_INFO
, "nfs send error %d for server %s\n", error
,
3274 !req
->r_nmp
? "<unmounted>" :
3275 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
);
3277 /* prefer request termination error over other errors */
3278 error2
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0);
3282 /* only allow the following errors to be returned */
3283 if ((error
!= EINTR
) && (error
!= ERESTART
) && (error
!= EIO
) &&
3284 (error
!= ENXIO
) && (error
!= ETIMEDOUT
))
3286 * We got some error we don't know what do do with,
3287 * i.e., we're not reconnecting, we map it to
3288 * EIO. Presumably our send failed and we better tell
3289 * the caller so they don't wait for a reply that is
3290 * never going to come. If we are reconnecting we
3291 * return 0 and the request will be resent.
3293 error
= needrecon
? 0 : EIO
;
3298 * NFS client socket upcalls
3300 * Pull RPC replies out of an NFS mount's socket and match them
3301 * up with the pending request.
3303 * The datagram code is simple because we always get whole
3304 * messages out of the socket.
3306 * The stream code is more involved because we have to parse
3307 * the RPC records out of the stream.
3310 /* NFS client UDP socket upcall */
3312 nfs_udp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3314 struct nfsmount
*nmp
= arg
;
3315 struct nfs_socket
*nso
= nmp
->nm_nso
;
3320 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
)
3324 /* make sure we're on the current socket */
3325 if (!nso
|| (nso
->nso_so
!= so
))
3330 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
3332 nfs_request_match_reply(nmp
, m
);
3333 } while (m
&& !error
);
3335 if (error
&& (error
!= EWOULDBLOCK
)) {
3336 /* problems with the socket... mark for reconnection */
3337 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error
);
3338 nfs_need_reconnect(nmp
);
3342 /* NFS client TCP socket upcall */
3344 nfs_tcp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3346 struct nfsmount
*nmp
= arg
;
3347 struct nfs_socket
*nso
= nmp
->nm_nso
;
3348 struct nfs_rpc_record_state nrrs
;
3354 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
)
3357 /* make sure we're on the current socket */
3358 lck_mtx_lock(&nmp
->nm_lock
);
3360 if (!nso
|| (nso
->nso_so
!= so
) || (nmp
->nm_sockflags
& (NMSOCK_DISCONNECTING
))) {
3361 lck_mtx_unlock(&nmp
->nm_lock
);
3364 lck_mtx_unlock(&nmp
->nm_lock
);
3366 /* make sure this upcall should be trying to do work */
3367 lck_mtx_lock(&nso
->nso_lock
);
3368 if (nso
->nso_flags
& (NSO_UPCALL
|NSO_DISCONNECTING
|NSO_DEAD
)) {
3369 lck_mtx_unlock(&nso
->nso_lock
);
3372 nso
->nso_flags
|= NSO_UPCALL
;
3373 nrrs
= nso
->nso_rrs
;
3374 lck_mtx_unlock(&nso
->nso_lock
);
3376 /* loop while we make error-free progress */
3377 while (!error
&& recv
) {
3378 error
= nfs_rpc_record_read(so
, &nrrs
, MSG_DONTWAIT
, &recv
, &m
);
3379 if (m
) /* match completed response with request */
3380 nfs_request_match_reply(nmp
, m
);
3383 /* Update the sockets's rpc parsing state */
3384 lck_mtx_lock(&nso
->nso_lock
);
3385 nso
->nso_rrs
= nrrs
;
3386 if (nso
->nso_flags
& NSO_DISCONNECTING
)
3388 nso
->nso_flags
&= ~NSO_UPCALL
;
3389 lck_mtx_unlock(&nso
->nso_lock
);
3391 wakeup(&nso
->nso_flags
);
3393 #ifdef NFS_SOCKET_DEBUGGING
3394 if (!recv
&& (error
!= EWOULDBLOCK
))
3395 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error
);
3397 /* note: no error and no data indicates server closed its end */
3398 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
3399 /* problems with the socket... mark for reconnection */
3400 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error
);
3401 nfs_need_reconnect(nmp
);
3406 * "poke" a socket to try to provoke any pending errors
3409 nfs_sock_poke(struct nfsmount
*nmp
)
3417 lck_mtx_lock(&nmp
->nm_lock
);
3418 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) ||
3419 !(nmp
->nm_sockflags
& NMSOCK_READY
) || !nmp
->nm_nso
|| !nmp
->nm_nso
->nso_so
) {
3420 /* Nothing to poke */
3421 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3422 wakeup(&nmp
->nm_sockflags
);
3423 lck_mtx_unlock(&nmp
->nm_lock
);
3426 lck_mtx_unlock(&nmp
->nm_lock
);
3427 aio
.iov_base
= &dummy
;
3430 bzero(&msg
, sizeof(msg
));
3433 error
= sock_send(nmp
->nm_nso
->nso_so
, &msg
, MSG_DONTWAIT
, &len
);
3434 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error
);
3435 lck_mtx_lock(&nmp
->nm_lock
);
3436 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3437 wakeup(&nmp
->nm_sockflags
);
3438 lck_mtx_unlock(&nmp
->nm_lock
);
3439 nfs_is_dead(error
, nmp
);
3443 * Match an RPC reply with the corresponding request
3446 nfs_request_match_reply(struct nfsmount
*nmp
, mbuf_t mrep
)
3449 struct nfsm_chain nmrep
;
3450 u_int32_t reply
= 0, rxid
= 0;
3451 int error
= 0, asyncioq
, t1
;
3453 /* Get the xid and check that it is an rpc reply */
3454 nfsm_chain_dissect_init(error
, &nmrep
, mrep
);
3455 nfsm_chain_get_32(error
, &nmrep
, rxid
);
3456 nfsm_chain_get_32(error
, &nmrep
, reply
);
3457 if (error
|| (reply
!= RPC_REPLY
)) {
3458 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3464 * Loop through the request list to match up the reply
3465 * Iff no match, just drop it.
3467 lck_mtx_lock(nfs_request_mutex
);
3468 TAILQ_FOREACH(req
, &nfs_reqq
, r_chain
) {
3469 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
)))
3471 /* looks like we have it, grab lock and double check */
3472 lck_mtx_lock(&req
->r_mtx
);
3473 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3474 lck_mtx_unlock(&req
->r_mtx
);
3478 req
->r_nmrep
= nmrep
;
3479 lck_mtx_lock(&nmp
->nm_lock
);
3480 if (nmp
->nm_sotype
== SOCK_DGRAM
) {
3482 * Update congestion window.
3483 * Do the additive increase of one rpc/rtt.
3485 FSDBG(530, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3486 if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
3488 ((NFS_CWNDSCALE
* NFS_CWNDSCALE
) +
3489 (nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
3490 if (nmp
->nm_cwnd
> NFS_MAXCWND
)
3491 nmp
->nm_cwnd
= NFS_MAXCWND
;
3493 if (req
->r_flags
& R_CWND
) {
3494 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3495 req
->r_flags
&= ~R_CWND
;
3497 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3498 /* congestion window is open, poke the cwnd queue */
3499 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3500 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3501 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3506 * Update rtt using a gain of 0.125 on the mean
3507 * and a gain of 0.25 on the deviation.
3509 if (req
->r_flags
& R_TIMING
) {
3511 * Since the timer resolution of
3512 * NFS_HZ is so course, it can often
3513 * result in r_rtt == 0. Since
3514 * r_rtt == N means that the actual
3515 * rtt is between N+dt and N+2-dt ticks,
3518 if (proct
[req
->r_procnum
] == 0)
3519 panic("nfs_request_match_reply: proct[%d] is zero", req
->r_procnum
);
3520 t1
= req
->r_rtt
+ 1;
3521 t1
-= (NFS_SRTT(req
) >> 3);
3522 NFS_SRTT(req
) += t1
;
3525 t1
-= (NFS_SDRTT(req
) >> 2);
3526 NFS_SDRTT(req
) += t1
;
3528 nmp
->nm_timeouts
= 0;
3529 lck_mtx_unlock(&nmp
->nm_lock
);
3530 /* signal anyone waiting on this request */
3532 asyncioq
= (req
->r_callback
.rcb_func
!= NULL
);
3533 if (nfs_request_using_gss(req
))
3534 nfs_gss_clnt_rpcdone(req
);
3535 lck_mtx_unlock(&req
->r_mtx
);
3536 lck_mtx_unlock(nfs_request_mutex
);
3537 /* if it's an async RPC with a callback, queue it up */
3539 nfs_asyncio_finish(req
);
3544 /* not matched to a request, so drop it. */
3545 lck_mtx_unlock(nfs_request_mutex
);
3546 OSAddAtomic64(1, &nfsstats
.rpcunexpected
);
3552 * Wait for the reply for a given request...
3553 * ...potentially resending the request if necessary.
3556 nfs_wait_reply(struct nfsreq
*req
)
3558 struct timespec ts
= { 2, 0 };
3559 int error
= 0, slpflag
, first
= 1;
3561 if (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
))
3566 lck_mtx_lock(&req
->r_mtx
);
3567 while (!req
->r_nmrep
.nmc_mhead
) {
3568 if ((error
= nfs_sigintr(req
->r_nmp
, req
, first
? NULL
: req
->r_thread
, 0)))
3570 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
)
3572 /* check if we need to resend */
3573 if (req
->r_flags
& R_MUSTRESEND
) {
3574 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3575 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
3576 req
->r_flags
|= R_SENDING
;
3577 lck_mtx_unlock(&req
->r_mtx
);
3578 if (nfs_request_using_gss(req
)) {
3580 * It's an RPCSEC_GSS request.
3581 * Can't just resend the original request
3582 * without bumping the cred sequence number.
3583 * Go back and re-build the request.
3585 lck_mtx_lock(&req
->r_mtx
);
3586 req
->r_flags
&= ~R_SENDING
;
3587 lck_mtx_unlock(&req
->r_mtx
);
3590 error
= nfs_send(req
, 1);
3591 lck_mtx_lock(&req
->r_mtx
);
3592 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3593 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
, error
);
3596 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
)
3599 /* need to poll if we're P_NOREMOTEHANG */
3600 if (nfs_noremotehang(req
->r_thread
))
3602 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitreply", &ts
);
3603 first
= slpflag
= 0;
3605 lck_mtx_unlock(&req
->r_mtx
);
3611 * An NFS request goes something like this:
3612 * (nb: always frees up mreq mbuf list)
3613 * nfs_request_create()
3614 * - allocates a request struct if one is not provided
3615 * - initial fill-in of the request struct
3616 * nfs_request_add_header()
3617 * - add the RPC header
3618 * nfs_request_send()
3619 * - link it into list
3620 * - call nfs_send() for first transmit
3621 * nfs_request_wait()
3622 * - call nfs_wait_reply() to wait for the reply
3623 * nfs_request_finish()
3624 * - break down rpc header and return with error or nfs reply
3625 * pointed to by nmrep.
3626 * nfs_request_rele()
3627 * nfs_request_destroy()
3628 * - clean up the request struct
3629 * - free the request struct if it was allocated by nfs_request_create()
3633 * Set up an NFS request struct (allocating if no request passed in).
3638 mount_t mp
, /* used only if !np */
3639 struct nfsm_chain
*nmrest
,
3643 struct nfsreq
**reqp
)
3645 struct nfsreq
*req
, *newreq
= NULL
;
3646 struct nfsmount
*nmp
;
3650 /* allocate a new NFS request structure */
3651 MALLOC_ZONE(newreq
, struct nfsreq
*, sizeof(*newreq
), M_NFSREQ
, M_WAITOK
);
3653 mbuf_freem(nmrest
->nmc_mhead
);
3654 nmrest
->nmc_mhead
= NULL
;
3660 bzero(req
, sizeof(*req
));
3662 req
->r_flags
= R_ALLOCATED
;
3664 nmp
= VFSTONFS(np
? NFSTOMP(np
) : mp
);
3665 if (nfs_mount_gone(nmp
)) {
3667 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3670 lck_mtx_lock(&nmp
->nm_lock
);
3671 if ((nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
)) &&
3672 (nmp
->nm_state
& NFSSTA_TIMEO
)) {
3673 lck_mtx_unlock(&nmp
->nm_lock
);
3674 mbuf_freem(nmrest
->nmc_mhead
);
3675 nmrest
->nmc_mhead
= NULL
;
3677 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3681 if ((nmp
->nm_vers
!= NFS_VER4
) && (procnum
>= 0) && (procnum
< NFS_NPROCS
))
3682 OSAddAtomic64(1, &nfsstats
.rpccnt
[procnum
]);
3683 if ((nmp
->nm_vers
== NFS_VER4
) && (procnum
!= NFSPROC4_COMPOUND
) && (procnum
!= NFSPROC4_NULL
))
3684 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum
);
3686 lck_mtx_init(&req
->r_mtx
, nfs_request_grp
, LCK_ATTR_NULL
);
3690 req
->r_thread
= thd
;
3692 req
->r_flags
|= R_NOINTR
;
3693 if (IS_VALID_CRED(cred
)) {
3694 kauth_cred_ref(cred
);
3697 req
->r_procnum
= procnum
;
3698 if (proct
[procnum
] > 0)
3699 req
->r_flags
|= R_TIMING
;
3700 req
->r_nmrep
.nmc_mhead
= NULL
;
3701 SLIST_INIT(&req
->r_gss_seqlist
);
3702 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3703 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3704 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3706 /* set auth flavor to use for request */
3708 req
->r_auth
= RPCAUTH_NONE
;
3709 else if (req
->r_np
&& (req
->r_np
->n_auth
!= RPCAUTH_INVALID
))
3710 req
->r_auth
= req
->r_np
->n_auth
;
3712 req
->r_auth
= nmp
->nm_auth
;
3714 lck_mtx_unlock(&nmp
->nm_lock
);
3716 /* move the request mbuf chain to the nfsreq */
3717 req
->r_mrest
= nmrest
->nmc_mhead
;
3718 nmrest
->nmc_mhead
= NULL
;
3720 req
->r_flags
|= R_INITTED
;
3728 * Clean up and free an NFS request structure.
3731 nfs_request_destroy(struct nfsreq
*req
)
3733 struct nfsmount
*nmp
;
3734 struct gss_seq
*gsp
, *ngsp
;
3735 int clearjbtimeo
= 0;
3737 if (!req
|| !(req
->r_flags
& R_INITTED
))
3740 req
->r_flags
&= ~R_INITTED
;
3741 if (req
->r_lflags
& RL_QUEUED
)
3742 nfs_reqdequeue(req
);
3744 if (req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
3746 * Still on an async I/O queue?
3747 * %%% But which one, we may be on a local iod.
3749 lck_mtx_lock(nfsiod_mutex
);
3750 if (nmp
&& req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
3751 TAILQ_REMOVE(&nmp
->nm_iodq
, req
, r_achain
);
3752 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3754 lck_mtx_unlock(nfsiod_mutex
);
3757 lck_mtx_lock(&req
->r_mtx
);
3759 lck_mtx_lock(&nmp
->nm_lock
);
3760 if (req
->r_flags
& R_CWND
) {
3761 /* Decrement the outstanding request count. */
3762 req
->r_flags
&= ~R_CWND
;
3763 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3764 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3765 /* congestion window is open, poke the cwnd queue */
3766 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3767 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3768 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3772 assert((req
->r_flags
& R_RESENDQ
) == 0);
3773 /* XXX should we just remove this conditional, we should have a reference if we're resending */
3774 if (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
) {
3775 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
3776 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3777 if (req
->r_flags
& R_RESENDQ
)
3778 req
->r_flags
&= ~R_RESENDQ
;
3780 if (req
->r_cchain
.tqe_next
!= NFSREQNOLIST
) {
3781 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3782 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3784 if (req
->r_flags
& R_JBTPRINTFMSG
) {
3785 req
->r_flags
&= ~R_JBTPRINTFMSG
;
3787 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
3789 lck_mtx_unlock(&nmp
->nm_lock
);
3791 lck_mtx_unlock(&req
->r_mtx
);
3794 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
3796 mbuf_freem(req
->r_mhead
);
3797 else if (req
->r_mrest
)
3798 mbuf_freem(req
->r_mrest
);
3799 if (req
->r_nmrep
.nmc_mhead
)
3800 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
3801 if (IS_VALID_CRED(req
->r_cred
))
3802 kauth_cred_unref(&req
->r_cred
);
3803 if (nfs_request_using_gss(req
))
3804 nfs_gss_clnt_rpcdone(req
);
3805 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
)
3808 nfs_gss_clnt_ctx_unref(req
);
3809 if (req
->r_wrongsec
)
3810 FREE(req
->r_wrongsec
, M_TEMP
);
3812 nfs_mount_rele(nmp
);
3813 lck_mtx_destroy(&req
->r_mtx
, nfs_request_grp
);
3814 if (req
->r_flags
& R_ALLOCATED
)
3815 FREE_ZONE(req
, sizeof(*req
), M_NFSREQ
);
3819 nfs_request_ref(struct nfsreq
*req
, int locked
)
3822 lck_mtx_lock(&req
->r_mtx
);
3823 if (req
->r_refs
<= 0)
3824 panic("nfsreq reference error");
3827 lck_mtx_unlock(&req
->r_mtx
);
3831 nfs_request_rele(struct nfsreq
*req
)
3835 lck_mtx_lock(&req
->r_mtx
);
3836 if (req
->r_refs
<= 0)
3837 panic("nfsreq reference underflow");
3839 destroy
= (req
->r_refs
== 0);
3840 lck_mtx_unlock(&req
->r_mtx
);
3842 nfs_request_destroy(req
);
3847 * Add an (updated) RPC header with authorization to an NFS request.
3850 nfs_request_add_header(struct nfsreq
*req
)
3852 struct nfsmount
*nmp
;
3856 /* free up any previous header */
3857 if ((m
= req
->r_mhead
)) {
3858 while (m
&& (m
!= req
->r_mrest
))
3860 req
->r_mhead
= NULL
;
3864 if (nfs_mount_gone(nmp
))
3867 error
= nfsm_rpchead(req
, req
->r_mrest
, &req
->r_xid
, &req
->r_mhead
);
3871 req
->r_mreqlen
= mbuf_pkthdr_len(req
->r_mhead
);
3873 if (nfs_mount_gone(nmp
))
3875 lck_mtx_lock(&nmp
->nm_lock
);
3876 if (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
))
3877 req
->r_retry
= nmp
->nm_retry
;
3879 req
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
3880 lck_mtx_unlock(&nmp
->nm_lock
);
3887 * Queue an NFS request up and send it out.
3890 nfs_request_send(struct nfsreq
*req
, int wait
)
3892 struct nfsmount
*nmp
;
3895 lck_mtx_lock(&req
->r_mtx
);
3896 req
->r_flags
|= R_SENDING
;
3897 lck_mtx_unlock(&req
->r_mtx
);
3899 lck_mtx_lock(nfs_request_mutex
);
3902 if (nfs_mount_gone(nmp
)) {
3903 lck_mtx_unlock(nfs_request_mutex
);
3908 if (!req
->r_start
) {
3909 req
->r_start
= now
.tv_sec
;
3910 req
->r_lastmsg
= now
.tv_sec
-
3911 ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
3914 OSAddAtomic64(1, &nfsstats
.rpcrequests
);
3917 * Chain request into list of outstanding requests. Be sure
3918 * to put it LAST so timer finds oldest requests first.
3919 * Make sure that the request queue timer is running
3920 * to check for possible request timeout.
3922 TAILQ_INSERT_TAIL(&nfs_reqq
, req
, r_chain
);
3923 req
->r_lflags
|= RL_QUEUED
;
3924 if (!nfs_request_timer_on
) {
3925 nfs_request_timer_on
= 1;
3926 nfs_interval_timer_start(nfs_request_timer_call
,
3929 lck_mtx_unlock(nfs_request_mutex
);
3931 /* Send the request... */
3932 return (nfs_send(req
, wait
));
3936 * Call nfs_wait_reply() to wait for the reply.
3939 nfs_request_wait(struct nfsreq
*req
)
3941 req
->r_error
= nfs_wait_reply(req
);
3945 * Finish up an NFS request by dequeueing it and
3946 * doing the initial NFS request reply processing.
3951 struct nfsm_chain
*nmrepp
,
3954 struct nfsmount
*nmp
;
3957 uint32_t verf_len
= 0;
3958 uint32_t reply_status
= 0;
3959 uint32_t rejected_status
= 0;
3960 uint32_t auth_status
= 0;
3961 uint32_t accepted_status
= 0;
3962 struct nfsm_chain nmrep
;
3963 int error
, clearjbtimeo
;
3965 error
= req
->r_error
;
3968 nmrepp
->nmc_mhead
= NULL
;
3970 /* RPC done, unlink the request. */
3971 nfs_reqdequeue(req
);
3973 mrep
= req
->r_nmrep
.nmc_mhead
;
3977 if ((req
->r_flags
& R_CWND
) && nmp
) {
3979 * Decrement the outstanding request count.
3981 req
->r_flags
&= ~R_CWND
;
3982 lck_mtx_lock(&nmp
->nm_lock
);
3983 FSDBG(273, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3984 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3985 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3986 /* congestion window is open, poke the cwnd queue */
3987 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3988 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3989 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3992 lck_mtx_unlock(&nmp
->nm_lock
);
3995 if (nfs_request_using_gss(req
)) {
3997 * If the request used an RPCSEC_GSS credential
3998 * then reset its sequence number bit in the
4001 nfs_gss_clnt_rpcdone(req
);
4004 * If we need to re-send, go back and re-build the
4005 * request based on a new sequence number.
4006 * Note that we're using the original XID.
4008 if (error
== EAGAIN
) {
4012 error
= nfs_gss_clnt_args_restore(req
); // remove any trailer mbufs
4013 req
->r_nmrep
.nmc_mhead
= NULL
;
4014 req
->r_flags
|= R_RESTART
;
4015 if (error
== ENEEDAUTH
) {
4016 req
->r_xid
= 0; // get a new XID
4024 * If there was a successful reply, make sure to mark the mount as up.
4025 * If a tprintf message was given (or if this is a timed-out soft mount)
4026 * then post a tprintf message indicating the server is alive again.
4029 if ((req
->r_flags
& R_TPRINTFMSG
) ||
4030 (nmp
&& (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) &&
4031 ((nmp
->nm_state
& (NFSSTA_TIMEO
|NFSSTA_FORCE
|NFSSTA_DEAD
)) == NFSSTA_TIMEO
)))
4032 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, "is alive again");
4034 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, NULL
);
4041 * break down the RPC header and check if ok
4043 nmrep
= req
->r_nmrep
;
4044 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
4046 if (reply_status
== RPC_MSGDENIED
) {
4047 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
4049 if (rejected_status
== RPC_MISMATCH
) {
4053 nfsm_chain_get_32(error
, &nmrep
, auth_status
);
4055 switch (auth_status
) {
4056 case RPCSEC_GSS_CREDPROBLEM
:
4057 case RPCSEC_GSS_CTXPROBLEM
:
4059 * An RPCSEC_GSS cred or context problem.
4060 * We can't use it anymore.
4061 * Restore the args, renew the context
4062 * and set up for a resend.
4064 error
= nfs_gss_clnt_args_restore(req
);
4065 if (error
&& error
!= ENEEDAUTH
)
4069 error
= nfs_gss_clnt_ctx_renew(req
);
4074 req
->r_nmrep
.nmc_mhead
= NULL
;
4075 req
->r_xid
= 0; // get a new XID
4076 req
->r_flags
|= R_RESTART
;
4085 /* Now check the verifier */
4086 nfsm_chain_get_32(error
, &nmrep
, verf_type
); // verifier flavor
4087 nfsm_chain_get_32(error
, &nmrep
, verf_len
); // verifier length
4090 switch (req
->r_auth
) {
4093 /* Any AUTH_SYS verifier is ignored */
4095 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
4096 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
4101 error
= nfs_gss_clnt_verf_get(req
, &nmrep
,
4102 verf_type
, verf_len
, &accepted_status
);
4107 switch (accepted_status
) {
4109 if (req
->r_procnum
== NFSPROC_NULL
) {
4111 * The NFS null procedure is unique,
4112 * in not returning an NFS status.
4116 nfsm_chain_get_32(error
, &nmrep
, *status
);
4120 if ((nmp
->nm_vers
!= NFS_VER2
) && (*status
== NFSERR_TRYLATER
)) {
4122 * It's a JUKEBOX error - delay and try again
4124 int delay
, slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4127 req
->r_nmrep
.nmc_mhead
= NULL
;
4128 if ((req
->r_delay
>= 30) && !(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4129 /* we're not yet completely mounted and */
4130 /* we can't complete an RPC, so we fail */
4131 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4133 error
= req
->r_error
;
4136 req
->r_delay
= !req
->r_delay
? NFS_TRYLATERDEL
: (req
->r_delay
* 2);
4137 if (req
->r_delay
> 30)
4139 if (nmp
->nm_tprintf_initial_delay
&& (req
->r_delay
>= nmp
->nm_tprintf_initial_delay
)) {
4140 if (!(req
->r_flags
& R_JBTPRINTFMSG
)) {
4141 req
->r_flags
|= R_JBTPRINTFMSG
;
4142 lck_mtx_lock(&nmp
->nm_lock
);
4144 lck_mtx_unlock(&nmp
->nm_lock
);
4146 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_JUKEBOXTIMEO
,
4147 "resource temporarily unavailable (jukebox)", 0);
4149 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (req
->r_delay
== 30) &&
4150 !(req
->r_flags
& R_NOINTR
)) {
4151 /* for soft mounts, just give up after a short while */
4152 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4154 error
= req
->r_error
;
4157 delay
= req
->r_delay
;
4158 if (req
->r_callback
.rcb_func
) {
4161 req
->r_resendtime
= now
.tv_sec
+ delay
;
4164 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
4166 tsleep(nfs_request_finish
, PSOCK
|slpflag
, "nfs_jukebox_trylater", hz
);
4168 } while (--delay
> 0);
4170 req
->r_xid
= 0; // get a new XID
4171 req
->r_flags
|= R_RESTART
;
4173 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_TRYLATER
);
4177 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4178 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4179 lck_mtx_lock(&nmp
->nm_lock
);
4181 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4182 lck_mtx_unlock(&nmp
->nm_lock
);
4183 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, "resource available again");
4186 if ((nmp
->nm_vers
>= NFS_VER4
) && (*status
== NFSERR_WRONGSEC
)) {
4188 * Hmmm... we need to try a different security flavor.
4189 * The first time a request hits this, we will allocate an array
4190 * to track flavors to try. We fill the array with the mount's
4191 * preferred flavors or the server's preferred flavors or just the
4192 * flavors we support.
4194 uint32_t srvflavors
[NX_MAX_SEC_FLAVORS
];
4197 /* Call SECINFO to try to get list of flavors from server. */
4198 srvcount
= NX_MAX_SEC_FLAVORS
;
4199 nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, req
->r_cred
, srvflavors
, &srvcount
);
4201 if (!req
->r_wrongsec
) {
4202 /* first time... set up flavor array */
4203 MALLOC(req
->r_wrongsec
, uint32_t*, NX_MAX_SEC_FLAVORS
*sizeof(uint32_t), M_TEMP
, M_WAITOK
);
4204 if (!req
->r_wrongsec
) {
4209 if (nmp
->nm_sec
.count
) { /* use the mount's preferred list of flavors */
4210 for(; i
< nmp
->nm_sec
.count
; i
++)
4211 req
->r_wrongsec
[i
] = nmp
->nm_sec
.flavors
[i
];
4212 } else if (srvcount
) { /* otherwise use the server's list of flavors */
4213 for(; i
< srvcount
; i
++)
4214 req
->r_wrongsec
[i
] = srvflavors
[i
];
4215 } else { /* otherwise, just try the flavors we support. */
4216 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5P
;
4217 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5I
;
4218 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5
;
4219 req
->r_wrongsec
[i
++] = RPCAUTH_SYS
;
4220 req
->r_wrongsec
[i
++] = RPCAUTH_NONE
;
4222 for(; i
< NX_MAX_SEC_FLAVORS
; i
++) /* invalidate any remaining slots */
4223 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4226 /* clear the current flavor from the list */
4227 for(i
=0; i
< NX_MAX_SEC_FLAVORS
; i
++)
4228 if (req
->r_wrongsec
[i
] == req
->r_auth
)
4229 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4231 /* find the next flavor to try */
4232 for(i
=0; i
< NX_MAX_SEC_FLAVORS
; i
++)
4233 if (req
->r_wrongsec
[i
] != RPCAUTH_INVALID
) {
4234 if (!srvcount
) /* no server list, just try it */
4236 /* check that it's in the server's list */
4237 for(j
=0; j
< srvcount
; j
++)
4238 if (req
->r_wrongsec
[i
] == srvflavors
[j
])
4240 if (j
< srvcount
) /* found */
4242 /* not found in server list */
4243 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4245 if (i
== NX_MAX_SEC_FLAVORS
) {
4246 /* nothing left to try! */
4251 /* retry with the next auth flavor */
4252 req
->r_auth
= req
->r_wrongsec
[i
];
4253 req
->r_xid
= 0; // get a new XID
4254 req
->r_flags
|= R_RESTART
;
4256 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_WRONGSEC
);
4259 if ((nmp
->nm_vers
>= NFS_VER4
) && req
->r_wrongsec
) {
4261 * We renegotiated security for this request; so update the
4262 * default security flavor for the associated node.
4265 req
->r_np
->n_auth
= req
->r_auth
;
4268 if (*status
== NFS_OK
) {
4270 * Successful NFS request
4273 req
->r_nmrep
.nmc_mhead
= NULL
;
4276 /* Got an NFS error of some kind */
4279 * If the File Handle was stale, invalidate the
4280 * lookup cache, just in case.
4282 if ((*status
== ESTALE
) && req
->r_np
) {
4283 cache_purge(NFSTOV(req
->r_np
));
4284 /* if monitored, also send delete event */
4285 if (vnode_ismonitored(NFSTOV(req
->r_np
)))
4286 nfs_vnode_notify(req
->r_np
, (VNODE_EVENT_ATTRIB
|VNODE_EVENT_DELETE
));
4288 if (nmp
->nm_vers
== NFS_VER2
)
4292 req
->r_nmrep
.nmc_mhead
= NULL
;
4295 case RPC_PROGUNAVAIL
:
4296 error
= EPROGUNAVAIL
;
4298 case RPC_PROGMISMATCH
:
4299 error
= ERPCMISMATCH
;
4301 case RPC_PROCUNAVAIL
:
4302 error
= EPROCUNAVAIL
;
4307 case RPC_SYSTEM_ERR
:
4313 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4314 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4315 lck_mtx_lock(&nmp
->nm_lock
);
4317 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4318 lck_mtx_unlock(&nmp
->nm_lock
);
4320 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4322 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
,
4323 (!error
&& (*status
== NFS_OK
)) ? 0xf0f0f0f0 : error
);
4328 * NFS request using a GSS/Kerberos security flavor?
4331 nfs_request_using_gss(struct nfsreq
*req
)
4333 if (!req
->r_gss_ctx
)
4335 switch (req
->r_auth
) {
4345 * Perform an NFS request synchronously.
4351 mount_t mp
, /* used only if !np */
4352 struct nfsm_chain
*nmrest
,
4355 struct nfsreq_secinfo_args
*si
,
4356 struct nfsm_chain
*nmrepp
,
4360 return nfs_request2(np
, mp
, nmrest
, procnum
,
4361 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4362 si
, 0, nmrepp
, xidp
, status
);
4368 mount_t mp
, /* used only if !np */
4369 struct nfsm_chain
*nmrest
,
4373 struct nfsreq_secinfo_args
*si
,
4375 struct nfsm_chain
*nmrepp
,
4379 struct nfsreq rq
, *req
= &rq
;
4382 if ((error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, &req
)))
4384 req
->r_flags
|= (flags
& (R_OPTMASK
| R_SOFT
));
4386 req
->r_secinfo
= *si
;
4388 FSDBG_TOP(273, R_XID32(req
->r_xid
), np
, procnum
, 0);
4391 req
->r_flags
&= ~R_RESTART
;
4392 if ((error
= nfs_request_add_header(req
)))
4396 if ((error
= nfs_request_send(req
, 1)))
4398 nfs_request_wait(req
);
4399 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4401 } while (req
->r_flags
& R_RESTART
);
4403 FSDBG_BOT(273, R_XID32(req
->r_xid
), np
, procnum
, error
);
4404 nfs_request_rele(req
);
4410 * Set up a new null proc request to exchange GSS context tokens with the
4411 * server. Associate the context that we are setting up with the request that we
4418 struct nfsm_chain
*nmrest
,
4422 struct nfs_gss_clnt_ctx
*cp
, /* Set to gss context to renew or setup */
4423 struct nfsm_chain
*nmrepp
,
4426 struct nfsreq rq
, *req
= &rq
;
4427 int error
, wait
= 1;
4429 if ((error
= nfs_request_create(NULL
, mp
, nmrest
, NFSPROC_NULL
, thd
, cred
, &req
)))
4431 req
->r_flags
|= (flags
& R_OPTMASK
);
4434 printf("nfs_request_gss request has no context\n");
4435 nfs_request_rele(req
);
4436 return (NFSERR_EAUTH
);
4438 nfs_gss_clnt_ctx_ref(req
, cp
);
4441 * Don't wait for a reply to a context destroy advisory
4442 * to avoid hanging on a dead server.
4444 if (cp
->gss_clnt_proc
== RPCSEC_GSS_DESTROY
)
4447 FSDBG_TOP(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, 0);
4450 req
->r_flags
&= ~R_RESTART
;
4451 if ((error
= nfs_request_add_header(req
)))
4454 if ((error
= nfs_request_send(req
, wait
)))
4459 nfs_request_wait(req
);
4460 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4462 } while (req
->r_flags
& R_RESTART
);
4464 FSDBG_BOT(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, error
);
4466 nfs_gss_clnt_ctx_unref(req
);
4467 nfs_request_rele(req
);
4473 * Create and start an asynchronous NFS request.
4478 mount_t mp
, /* used only if !np */
4479 struct nfsm_chain
*nmrest
,
4483 struct nfsreq_secinfo_args
*si
,
4485 struct nfsreq_cbinfo
*cb
,
4486 struct nfsreq
**reqp
)
4489 struct nfsmount
*nmp
;
4492 error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, reqp
);
4494 FSDBG(274, (req
? R_XID32(req
->r_xid
) : 0), np
, procnum
, error
);
4497 req
->r_flags
|= (flags
& R_OPTMASK
);
4498 req
->r_flags
|= R_ASYNC
;
4500 req
->r_secinfo
= *si
;
4502 req
->r_callback
= *cb
;
4503 error
= nfs_request_add_header(req
);
4505 req
->r_flags
|= R_WAITSENT
;
4506 if (req
->r_callback
.rcb_func
)
4507 nfs_request_ref(req
, 0);
4508 error
= nfs_request_send(req
, 1);
4509 lck_mtx_lock(&req
->r_mtx
);
4510 if (!error
&& !(req
->r_flags
& R_SENT
) && req
->r_callback
.rcb_func
) {
4511 /* make sure to wait until this async I/O request gets sent */
4512 int slpflag
= (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4513 struct timespec ts
= { 2, 0 };
4514 while (!(req
->r_flags
& R_SENT
)) {
4516 if ((req
->r_flags
& R_RESENDQ
) && !nfs_mount_gone(nmp
)) {
4517 lck_mtx_lock(&nmp
->nm_lock
);
4518 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4520 * It's not going to get off the resend queue if we're in recovery.
4521 * So, just take it off ourselves. We could be holding mount state
4522 * busy and thus holding up the start of recovery.
4524 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4525 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4526 if (req
->r_flags
& R_RESENDQ
)
4527 req
->r_flags
&= ~R_RESENDQ
;
4528 lck_mtx_unlock(&nmp
->nm_lock
);
4529 req
->r_flags
|= R_SENDING
;
4530 lck_mtx_unlock(&req
->r_mtx
);
4531 error
= nfs_send(req
, 1);
4532 /* Remove the R_RESENDQ reference */
4533 nfs_request_rele(req
);
4534 lck_mtx_lock(&req
->r_mtx
);
4539 lck_mtx_unlock(&nmp
->nm_lock
);
4541 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
4543 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitsent", &ts
);
4547 sent
= req
->r_flags
& R_SENT
;
4548 lck_mtx_unlock(&req
->r_mtx
);
4549 if (error
&& req
->r_callback
.rcb_func
&& !sent
) {
4550 nfs_request_rele(req
);
4553 FSDBG(274, R_XID32(req
->r_xid
), np
, procnum
, error
);
4554 if (error
|| req
->r_callback
.rcb_func
)
4555 nfs_request_rele(req
);
4561 * Wait for and finish an asynchronous NFS request.
4564 nfs_request_async_finish(
4566 struct nfsm_chain
*nmrepp
,
4570 int error
= 0, asyncio
= req
->r_callback
.rcb_func
? 1 : 0;
4571 struct nfsmount
*nmp
;
4573 lck_mtx_lock(&req
->r_mtx
);
4575 req
->r_flags
|= R_ASYNCWAIT
;
4576 while (req
->r_flags
& R_RESENDQ
) { /* wait until the request is off the resend queue */
4577 struct timespec ts
= { 2, 0 };
4579 if ((nmp
= req
->r_nmp
)) {
4580 lck_mtx_lock(&nmp
->nm_lock
);
4581 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4583 * It's not going to get off the resend queue if we're in recovery.
4584 * So, just take it off ourselves. We could be holding mount state
4585 * busy and thus holding up the start of recovery.
4587 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4588 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4589 if (req
->r_flags
& R_RESENDQ
)
4590 req
->r_flags
&= ~R_RESENDQ
;
4591 /* Remove the R_RESENDQ reference */
4592 assert(req
->r_refs
> 0);
4594 lck_mtx_unlock(&nmp
->nm_lock
);
4597 lck_mtx_unlock(&nmp
->nm_lock
);
4599 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
4601 msleep(req
, &req
->r_mtx
, PZERO
-1, "nfsresendqwait", &ts
);
4603 lck_mtx_unlock(&req
->r_mtx
);
4606 nfs_request_wait(req
);
4607 error
= nfs_request_finish(req
, nmrepp
, status
);
4610 while (!error
&& (req
->r_flags
& R_RESTART
)) {
4612 assert(req
->r_achain
.tqe_next
== NFSREQNOLIST
);
4613 lck_mtx_lock(&req
->r_mtx
);
4614 req
->r_flags
&= ~R_IOD
;
4615 if (req
->r_resendtime
) { /* send later */
4616 nfs_asyncio_resend(req
);
4617 lck_mtx_unlock(&req
->r_mtx
);
4618 return (EINPROGRESS
);
4620 lck_mtx_unlock(&req
->r_mtx
);
4623 req
->r_flags
&= ~R_RESTART
;
4624 if ((error
= nfs_request_add_header(req
)))
4626 if ((error
= nfs_request_send(req
, !asyncio
)))
4629 return (EINPROGRESS
);
4630 nfs_request_wait(req
);
4631 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4637 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, error
);
4638 nfs_request_rele(req
);
4643 * Cancel a pending asynchronous NFS request.
4646 nfs_request_async_cancel(struct nfsreq
*req
)
4648 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, 0xD1ED1E);
4649 nfs_request_rele(req
);
4653 * Flag a request as being terminated.
4656 nfs_softterm(struct nfsreq
*req
)
4658 struct nfsmount
*nmp
= req
->r_nmp
;
4659 req
->r_flags
|= R_SOFTTERM
;
4660 req
->r_error
= ETIMEDOUT
;
4661 if (!(req
->r_flags
& R_CWND
) || nfs_mount_gone(nmp
))
4663 /* update congestion window */
4664 req
->r_flags
&= ~R_CWND
;
4665 lck_mtx_lock(&nmp
->nm_lock
);
4666 FSDBG(532, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4667 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4668 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4669 /* congestion window is open, poke the cwnd queue */
4670 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4671 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4672 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4675 lck_mtx_unlock(&nmp
->nm_lock
);
4679 * Ensure req isn't in use by the timer, then dequeue it.
4682 nfs_reqdequeue(struct nfsreq
*req
)
4684 lck_mtx_lock(nfs_request_mutex
);
4685 while (req
->r_lflags
& RL_BUSY
) {
4686 req
->r_lflags
|= RL_WAITING
;
4687 msleep(&req
->r_lflags
, nfs_request_mutex
, PSOCK
, "reqdeq", NULL
);
4689 if (req
->r_lflags
& RL_QUEUED
) {
4690 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
4691 req
->r_lflags
&= ~RL_QUEUED
;
4693 lck_mtx_unlock(nfs_request_mutex
);
4697 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
4698 * free()'d out from under it.
4701 nfs_reqbusy(struct nfsreq
*req
)
4703 if (req
->r_lflags
& RL_BUSY
)
4704 panic("req locked");
4705 req
->r_lflags
|= RL_BUSY
;
4709 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
4712 nfs_reqnext(struct nfsreq
*req
)
4714 struct nfsreq
* nextreq
;
4719 * We need to get and busy the next req before signalling the
4720 * current one, otherwise wakeup() may block us and we'll race to
4721 * grab the next req.
4723 nextreq
= TAILQ_NEXT(req
, r_chain
);
4724 if (nextreq
!= NULL
)
4725 nfs_reqbusy(nextreq
);
4726 /* unbusy and signal. */
4727 req
->r_lflags
&= ~RL_BUSY
;
4728 if (req
->r_lflags
& RL_WAITING
) {
4729 req
->r_lflags
&= ~RL_WAITING
;
4730 wakeup(&req
->r_lflags
);
4736 * NFS request queue timer routine
4738 * Scan the NFS request queue for any requests that have timed out.
4740 * Alert the system of unresponsive servers.
4741 * Mark expired requests on soft mounts as terminated.
4742 * For UDP, mark/signal requests for retransmission.
4745 nfs_request_timer(__unused
void *param0
, __unused
void *param1
)
4748 struct nfsmount
*nmp
;
4749 int timeo
, maxtime
, finish_asyncio
, error
;
4751 TAILQ_HEAD(nfs_mount_pokeq
, nfsmount
) nfs_mount_poke_queue
;
4752 TAILQ_INIT(&nfs_mount_poke_queue
);
4755 lck_mtx_lock(nfs_request_mutex
);
4756 req
= TAILQ_FIRST(&nfs_reqq
);
4757 if (req
== NULL
) { /* no requests - turn timer off */
4758 nfs_request_timer_on
= 0;
4759 lck_mtx_unlock(nfs_request_mutex
);
4766 for ( ; req
!= NULL
; req
= nfs_reqnext(req
)) {
4769 NFS_SOCK_DBG("Found a request with out a mount!\n");
4772 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
)
4774 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0))) {
4775 if (req
->r_callback
.rcb_func
!= NULL
) {
4776 /* async I/O RPC needs to be finished */
4777 lck_mtx_lock(&req
->r_mtx
);
4778 req
->r_error
= error
;
4779 finish_asyncio
= !(req
->r_flags
& R_WAITSENT
);
4781 lck_mtx_unlock(&req
->r_mtx
);
4783 nfs_asyncio_finish(req
);
4788 lck_mtx_lock(&req
->r_mtx
);
4790 if (nmp
->nm_tprintf_initial_delay
&&
4791 ((req
->r_rexmit
> 2) || (req
->r_flags
& R_RESENDERR
)) &&
4792 ((req
->r_lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
4793 req
->r_lastmsg
= now
.tv_sec
;
4794 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
4795 "not responding", 1);
4796 req
->r_flags
|= R_TPRINTFMSG
;
4797 lck_mtx_lock(&nmp
->nm_lock
);
4798 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4799 lck_mtx_unlock(&nmp
->nm_lock
);
4800 /* we're not yet completely mounted and */
4801 /* we can't complete an RPC, so we fail */
4802 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4804 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
4806 lck_mtx_unlock(&req
->r_mtx
);
4808 nfs_asyncio_finish(req
);
4811 lck_mtx_unlock(&nmp
->nm_lock
);
4815 * Put a reasonable limit on the maximum timeout,
4816 * and reduce that limit when soft mounts get timeouts or are in reconnect.
4818 if (!(NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && !nfs_can_squish(nmp
))
4819 maxtime
= NFS_MAXTIMEO
;
4820 else if ((req
->r_flags
& (R_SETUP
|R_RECOVER
)) ||
4821 ((nmp
->nm_reconnect_start
<= 0) || ((now
.tv_sec
- nmp
->nm_reconnect_start
) < 8)))
4822 maxtime
= (NFS_MAXTIMEO
/ (nmp
->nm_timeouts
+1))/2;
4824 maxtime
= NFS_MINTIMEO
/4;
4827 * Check for request timeout.
4829 if (req
->r_rtt
>= 0) {
4831 lck_mtx_lock(&nmp
->nm_lock
);
4832 if (req
->r_flags
& R_RESENDERR
) {
4833 /* with resend errors, retry every few seconds */
4836 if (req
->r_procnum
== NFSPROC_NULL
&& req
->r_gss_ctx
!= NULL
)
4837 timeo
= NFS_MINIDEMTIMEO
; // gss context setup
4838 else if (NMFLAG(nmp
, DUMBTIMER
))
4839 timeo
= nmp
->nm_timeo
;
4841 timeo
= NFS_RTO(nmp
, proct
[req
->r_procnum
]);
4843 /* ensure 62.5 ms floor */
4844 while (16 * timeo
< hz
)
4846 if (nmp
->nm_timeouts
> 0)
4847 timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
4849 /* limit timeout to max */
4850 if (timeo
> maxtime
)
4852 if (req
->r_rtt
<= timeo
) {
4853 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req
->r_rtt
, timeo
);
4854 lck_mtx_unlock(&nmp
->nm_lock
);
4855 lck_mtx_unlock(&req
->r_mtx
);
4858 /* The request has timed out */
4859 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
4860 req
->r_procnum
, proct
[req
->r_procnum
],
4861 req
->r_xid
, req
->r_rtt
, timeo
, nmp
->nm_timeouts
,
4862 (now
.tv_sec
- req
->r_start
)*NFS_HZ
, maxtime
);
4863 if (nmp
->nm_timeouts
< 8)
4865 if (nfs_mount_check_dead_timeout(nmp
)) {
4866 /* Unbusy this request */
4867 req
->r_lflags
&= ~RL_BUSY
;
4868 if (req
->r_lflags
& RL_WAITING
) {
4869 req
->r_lflags
&= ~RL_WAITING
;
4870 wakeup(&req
->r_lflags
);
4872 lck_mtx_unlock(&req
->r_mtx
);
4874 /* No need to poke this mount */
4875 if (nmp
->nm_sockflags
& NMSOCK_POKE
) {
4876 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
4877 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
4879 /* Release our lock state, so we can become a zombie */
4880 lck_mtx_unlock(nfs_request_mutex
);
4883 * Note nfs_mount_make zombie(nmp) must be
4884 * called with nm_lock held. After doing some
4885 * work we release nm_lock in
4886 * nfs_make_mount_zombie with out acquiring any
4887 * other locks. (Later, in nfs_mount_zombie we
4888 * will acquire nfs_request_mutex, r_mtx,
4889 * nm_lock in that order). So we should not be
4890 * introducing deadlock here. We take a reference
4891 * on the mount so that its still there when we
4895 nfs_mount_make_zombie(nmp
);
4896 lck_mtx_unlock(&nmp
->nm_lock
);
4897 nfs_mount_rele(nmp
);
4900 * All the request for this mount have now been
4901 * removed from the request queue. Restart to
4902 * process the remaining mounts
4907 /* if it's been a few seconds, try poking the socket */
4908 if ((nmp
->nm_sotype
== SOCK_STREAM
) &&
4909 ((now
.tv_sec
- req
->r_start
) >= 3) &&
4910 !(nmp
->nm_sockflags
& (NMSOCK_POKE
|NMSOCK_UNMOUNT
)) &&
4911 (nmp
->nm_sockflags
& NMSOCK_READY
)) {
4912 nmp
->nm_sockflags
|= NMSOCK_POKE
;
4914 * We take a ref on the mount so that we know the mount will still be there
4915 * when we process the nfs_mount_poke_queue. An unmount request will block
4916 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
4917 * the reference after calling nfs_sock_poke below;
4920 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
4922 lck_mtx_unlock(&nmp
->nm_lock
);
4925 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
4926 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& (R_SETUP
|R_RECOVER
|R_SOFT
))) &&
4927 ((req
->r_rexmit
>= req
->r_retry
) || /* too many */
4928 ((now
.tv_sec
- req
->r_start
)*NFS_HZ
> maxtime
))) { /* too long */
4929 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4930 lck_mtx_lock(&nmp
->nm_lock
);
4931 if (!(nmp
->nm_state
& NFSSTA_TIMEO
)) {
4932 lck_mtx_unlock(&nmp
->nm_lock
);
4933 /* make sure we note the unresponsive server */
4934 /* (maxtime may be less than tprintf delay) */
4935 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
4936 "not responding", 1);
4937 req
->r_lastmsg
= now
.tv_sec
;
4938 req
->r_flags
|= R_TPRINTFMSG
;
4940 lck_mtx_unlock(&nmp
->nm_lock
);
4942 if (req
->r_flags
& R_NOINTR
) {
4943 /* don't terminate nointr requests on timeout */
4944 lck_mtx_unlock(&req
->r_mtx
);
4947 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
4948 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
,
4949 now
.tv_sec
- req
->r_start
);
4951 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
4953 lck_mtx_unlock(&req
->r_mtx
);
4955 nfs_asyncio_finish(req
);
4959 /* for TCP, only resend if explicitly requested */
4960 if ((nmp
->nm_sotype
== SOCK_STREAM
) && !(req
->r_flags
& R_MUSTRESEND
)) {
4961 if (++req
->r_rexmit
> NFS_MAXREXMIT
)
4962 req
->r_rexmit
= NFS_MAXREXMIT
;
4964 lck_mtx_unlock(&req
->r_mtx
);
4969 * The request needs to be (re)sent. Kick the requester to resend it.
4970 * (unless it's already marked as needing a resend)
4972 if ((req
->r_flags
& R_MUSTRESEND
) && (req
->r_rtt
== -1)) {
4973 lck_mtx_unlock(&req
->r_mtx
);
4976 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
4977 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
4978 req
->r_flags
|= R_MUSTRESEND
;
4981 if ((req
->r_flags
& (R_IOD
|R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
4982 nfs_asyncio_resend(req
);
4983 lck_mtx_unlock(&req
->r_mtx
);
4986 lck_mtx_unlock(nfs_request_mutex
);
4988 /* poke any sockets */
4989 while ((nmp
= TAILQ_FIRST(&nfs_mount_poke_queue
))) {
4990 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
4992 nfs_mount_rele(nmp
);
4995 nfs_interval_timer_start(nfs_request_timer_call
, NFS_REQUESTDELAY
);
4999 * check a thread's proc for the "noremotehang" flag.
5002 nfs_noremotehang(thread_t thd
)
5004 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5005 return (p
&& proc_noremotehang(p
));
5009 * Test for a termination condition pending on the process.
5010 * This is used to determine if we need to bail on a mount.
5011 * ETIMEDOUT is returned if there has been a soft timeout.
5012 * EINTR is returned if there is a signal pending that is not being ignored
5013 * and the mount is interruptable, or if we are a thread that is in the process
5014 * of cancellation (also SIGKILL posted).
5016 extern int sigprop
[NSIG
+1];
5018 nfs_sigintr(struct nfsmount
*nmp
, struct nfsreq
*req
, thread_t thd
, int nmplocked
)
5026 if (req
&& (req
->r_flags
& R_SOFTTERM
))
5027 return (ETIMEDOUT
); /* request has been terminated. */
5028 if (req
&& (req
->r_flags
& R_NOINTR
))
5029 thd
= NULL
; /* don't check for signal on R_NOINTR */
5032 lck_mtx_lock(&nmp
->nm_lock
);
5033 if (nmp
->nm_state
& NFSSTA_FORCE
) {
5034 /* If a force unmount is in progress then fail. */
5036 } else if (vfs_isforce(nmp
->nm_mountp
)) {
5037 /* Someone is unmounting us, go soft and mark it. */
5038 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_SOFT
);
5039 nmp
->nm_state
|= NFSSTA_FORCE
;
5042 /* Check if the mount is marked dead. */
5043 if (!error
&& (nmp
->nm_state
& NFSSTA_DEAD
))
5047 * If the mount is hung and we've requested not to hang
5048 * on remote filesystems, then bail now.
5050 if (current_proc() != kernproc
&&
5051 !error
&& (nmp
->nm_state
& NFSSTA_TIMEO
) && nfs_noremotehang(thd
))
5055 lck_mtx_unlock(&nmp
->nm_lock
);
5059 /* may not have a thread for async I/O */
5060 if (thd
== NULL
|| current_proc() == kernproc
)
5064 * Check if the process is aborted, but don't interrupt if we
5065 * were killed by a signal and this is the exiting thread which
5066 * is attempting to dump core.
5068 if (((p
= current_proc()) != kernproc
) && current_thread_aborted() &&
5069 (!(p
->p_acflag
& AXSIG
) || (p
->exit_thread
!= current_thread()) ||
5070 (p
->p_sigacts
== NULL
) ||
5071 (p
->p_sigacts
->ps_sig
< 1) || (p
->p_sigacts
->ps_sig
> NSIG
) ||
5072 !(sigprop
[p
->p_sigacts
->ps_sig
] & SA_CORE
)))
5075 /* mask off thread and process blocked signals. */
5076 if (NMFLAG(nmp
, INTR
) && ((p
= get_bsdthreadtask_info(thd
))) &&
5077 proc_pendingsignals(p
, NFSINT_SIGMASK
))
5083 * Lock a socket against others.
5084 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5085 * and also to avoid race conditions between the processes with nfs requests
5086 * in progress when a reconnect is necessary.
5089 nfs_sndlock(struct nfsreq
*req
)
5091 struct nfsmount
*nmp
= req
->r_nmp
;
5093 int error
= 0, slpflag
= 0;
5094 struct timespec ts
= { 0, 0 };
5096 if (nfs_mount_gone(nmp
))
5099 lck_mtx_lock(&nmp
->nm_lock
);
5100 statep
= &nmp
->nm_state
;
5102 if (NMFLAG(nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
))
5104 while (*statep
& NFSSTA_SNDLOCK
) {
5105 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 1)))
5107 *statep
|= NFSSTA_WANTSND
;
5108 if (nfs_noremotehang(req
->r_thread
))
5110 msleep(statep
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsndlck", &ts
);
5111 if (slpflag
== PCATCH
) {
5117 *statep
|= NFSSTA_SNDLOCK
;
5118 lck_mtx_unlock(&nmp
->nm_lock
);
5123 * Unlock the stream socket for others.
5126 nfs_sndunlock(struct nfsreq
*req
)
5128 struct nfsmount
*nmp
= req
->r_nmp
;
5129 int *statep
, wake
= 0;
5133 lck_mtx_lock(&nmp
->nm_lock
);
5134 statep
= &nmp
->nm_state
;
5135 if ((*statep
& NFSSTA_SNDLOCK
) == 0)
5136 panic("nfs sndunlock");
5137 *statep
&= ~(NFSSTA_SNDLOCK
|NFSSTA_SENDING
);
5138 if (*statep
& NFSSTA_WANTSND
) {
5139 *statep
&= ~NFSSTA_WANTSND
;
5142 lck_mtx_unlock(&nmp
->nm_lock
);
5149 struct nfsmount
*nmp
,
5151 struct sockaddr
*saddr
,
5158 struct nfsm_chain
*nmrep
)
5160 int error
= 0, on
= 1, try, sendat
= 2, soproto
, recv
, optlen
, restoreto
= 0;
5161 socket_t newso
= NULL
;
5162 struct sockaddr_storage ss
;
5163 struct timeval orig_rcvto
, orig_sndto
, tv
= { 1, 0 };
5164 mbuf_t m
, mrep
= NULL
;
5166 uint32_t rxid
= 0, reply
= 0, reply_status
, rejected_status
;
5167 uint32_t verf_type
, verf_len
, accepted_status
;
5168 size_t readlen
, sentlen
;
5169 struct nfs_rpc_record_state nrrs
;
5172 /* create socket and set options */
5173 soproto
= (sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
;
5174 if ((error
= sock_socket(saddr
->sa_family
, sotype
, soproto
, NULL
, NULL
, &newso
)))
5178 int level
= (saddr
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
5179 int optname
= (saddr
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
5180 int portrange
= IP_PORTRANGE_LOW
;
5181 error
= sock_setsockopt(newso
, level
, optname
, &portrange
, sizeof(portrange
));
5183 ss
.ss_len
= saddr
->sa_len
;
5184 ss
.ss_family
= saddr
->sa_family
;
5185 if (ss
.ss_family
== AF_INET
) {
5186 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
5187 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
5188 } else if (ss
.ss_family
== AF_INET6
) {
5189 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
5190 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
5195 error
= sock_bind(newso
, (struct sockaddr
*)&ss
);
5199 if (sotype
== SOCK_STREAM
) {
5200 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5203 error
= sock_connect(newso
, saddr
, MSG_DONTWAIT
);
5204 if (error
== EINPROGRESS
)
5208 while ((error
= sock_connectwait(newso
, &tv
)) == EINPROGRESS
) {
5209 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5210 if (++count
>= NFS_AUX_CONNECTION_TIMEOUT
) {
5217 if (((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
)))) ||
5218 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
)))) ||
5219 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
)))))
5223 /* make sure socket is using a one second timeout in this function */
5224 optlen
= sizeof(orig_rcvto
);
5225 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, &optlen
);
5227 optlen
= sizeof(orig_sndto
);
5228 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, &optlen
);
5231 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
));
5232 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
));
5237 if (sotype
== SOCK_STREAM
) {
5238 sendat
= 0; /* we only resend the request for UDP */
5239 nfs_rpc_record_state_init(&nrrs
);
5242 for (try=0; try < timeo
; try++) {
5243 if ((error
= nfs_sigintr(nmp
, NULL
, !try ? NULL
: thd
, 0)))
5245 if (!try || (try == sendat
)) {
5246 /* send the request (resending periodically for UDP) */
5247 if ((error
= mbuf_copym(mreq
, 0, MBUF_COPYALL
, MBUF_WAITOK
, &m
)))
5249 bzero(&msg
, sizeof(msg
));
5250 if ((sotype
== SOCK_DGRAM
) && !sock_isconnected(so
)) {
5251 msg
.msg_name
= saddr
;
5252 msg
.msg_namelen
= saddr
->sa_len
;
5254 if ((error
= sock_sendmbuf(so
, &msg
, m
, 0, &sentlen
)))
5260 /* wait for the response */
5261 if (sotype
== SOCK_STREAM
) {
5262 /* try to read (more of) record */
5263 error
= nfs_rpc_record_read(so
, &nrrs
, 0, &recv
, &mrep
);
5264 /* if we don't have the whole record yet, we'll keep trying */
5267 bzero(&msg
, sizeof(msg
));
5268 error
= sock_receivembuf(so
, &msg
, &mrep
, 0, &readlen
);
5270 if (error
== EWOULDBLOCK
)
5273 /* parse the response */
5274 nfsm_chain_dissect_init(error
, nmrep
, mrep
);
5275 nfsm_chain_get_32(error
, nmrep
, rxid
);
5276 nfsm_chain_get_32(error
, nmrep
, reply
);
5278 if ((rxid
!= xid
) || (reply
!= RPC_REPLY
))
5280 nfsm_chain_get_32(error
, nmrep
, reply_status
);
5282 if (reply_status
== RPC_MSGDENIED
) {
5283 nfsm_chain_get_32(error
, nmrep
, rejected_status
);
5285 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
5288 nfsm_chain_get_32(error
, nmrep
, verf_type
); /* verifier flavor */
5289 nfsm_chain_get_32(error
, nmrep
, verf_len
); /* verifier length */
5292 nfsm_chain_adv(error
, nmrep
, nfsm_rndup(verf_len
));
5293 nfsm_chain_get_32(error
, nmrep
, accepted_status
);
5295 switch (accepted_status
) {
5299 case RPC_PROGUNAVAIL
:
5300 error
= EPROGUNAVAIL
;
5302 case RPC_PROGMISMATCH
:
5303 error
= EPROGMISMATCH
;
5305 case RPC_PROCUNAVAIL
:
5306 error
= EPROCUNAVAIL
;
5311 case RPC_SYSTEM_ERR
:
5320 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, sizeof(tv
));
5321 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, sizeof(tv
));
5324 sock_shutdown(newso
, SHUT_RDWR
);
5333 struct nfsmount
*nmp
,
5335 struct sockaddr
*sa
,
5342 thread_t thd
= vfs_context_thread(ctx
);
5343 kauth_cred_t cred
= vfs_context_ucred(ctx
);
5344 struct sockaddr_storage ss
;
5345 struct sockaddr
*saddr
= (struct sockaddr
*)&ss
;
5346 struct nfsm_chain nmreq
, nmrep
;
5348 int error
= 0, ip
, pmprog
, pmvers
, pmproc
;
5352 char uaddr
[MAX_IPv6_STR_LEN
+16];
5354 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5355 if (saddr
->sa_family
== AF_INET
) {
5359 pmproc
= PMAPPROC_GETPORT
;
5360 } else if (saddr
->sa_family
== AF_INET6
) {
5364 pmproc
= RPCBPROC_GETVERSADDR
;
5368 nfsm_chain_null(&nmreq
);
5369 nfsm_chain_null(&nmrep
);
5372 /* send portmapper request to get port/uaddr */
5374 ((struct sockaddr_in
*)saddr
)->sin_port
= htons(PMAPPORT
);
5376 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(PMAPPORT
);
5377 nfsm_chain_build_alloc_init(error
, &nmreq
, 8*NFSX_UNSIGNED
);
5378 nfsm_chain_add_32(error
, &nmreq
, protocol
);
5379 nfsm_chain_add_32(error
, &nmreq
, vers
);
5381 nfsm_chain_add_32(error
, &nmreq
, ipproto
);
5382 nfsm_chain_add_32(error
, &nmreq
, 0);
5384 if (ipproto
== IPPROTO_TCP
)
5385 nfsm_chain_add_string(error
, &nmreq
, "tcp6", 4);
5387 nfsm_chain_add_string(error
, &nmreq
, "udp6", 4);
5388 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* uaddr */
5389 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* owner */
5391 nfsm_chain_build_done(error
, &nmreq
);
5393 error
= nfsm_rpchead2(nmp
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5394 pmprog
, pmvers
, pmproc
, RPCAUTH_SYS
, cred
, NULL
, nmreq
.nmc_mhead
,
5397 nmreq
.nmc_mhead
= NULL
;
5398 error
= nfs_aux_request(nmp
, thd
, saddr
, so
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5399 mreq
, R_XID32(xid
), 0, timeo
, &nmrep
);
5401 /* grab port from portmap response */
5403 nfsm_chain_get_32(error
, &nmrep
, port
);
5405 ((struct sockaddr_in
*)sa
)->sin_port
= htons(port
);
5407 /* get uaddr string and convert to sockaddr */
5408 nfsm_chain_get_32(error
, &nmrep
, ualen
);
5410 if (ualen
> (sizeof(uaddr
)-1))
5413 /* program is not available, just return a zero port */
5414 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5415 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(0);
5417 nfsm_chain_get_opaque(error
, &nmrep
, ualen
, uaddr
);
5419 uaddr
[ualen
] = '\0';
5420 if (!nfs_uaddr2sockaddr(uaddr
, saddr
))
5425 if ((error
== EPROGMISMATCH
) || (error
== EPROCUNAVAIL
) || (error
== EIO
) || (error
== EBADRPC
)) {
5426 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
5427 if (pmvers
== RPCBVERS4
) {
5428 /* fall back to v3 and GETADDR */
5430 pmproc
= RPCBPROC_GETADDR
;
5431 nfsm_chain_cleanup(&nmreq
);
5432 nfsm_chain_cleanup(&nmrep
);
5433 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5440 bcopy(saddr
, sa
, min(saddr
->sa_len
, sa
->sa_len
));
5443 nfsm_chain_cleanup(&nmreq
);
5444 nfsm_chain_cleanup(&nmrep
);
5449 nfs_msg(thread_t thd
,
5454 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5458 tpr
= tprintf_open(p
);
5462 tprintf(tpr
, "nfs server %s: %s, error %d\n", server
, msg
, error
);
5464 tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);
5469 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
5470 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
5471 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
5472 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
5473 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
5475 uint32_t nfs_squishy_flags
= NFS_SQUISH_MOBILE_ONLY
| NFS_SQUISH_AUTOMOUNTED_ONLY
| NFS_SQUISH_QUICK
;
5476 int32_t nfs_is_mobile
;
5478 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
5479 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
5482 * Could this mount be squished?
5485 nfs_can_squish(struct nfsmount
*nmp
)
5487 uint64_t flags
= vfs_flags(nmp
->nm_mountp
);
5488 int softsquish
= ((nfs_squishy_flags
& NFS_SQUISH_SOFT
) & NMFLAG(nmp
, SOFT
));
5490 if (!softsquish
&& (nfs_squishy_flags
& NFS_SQUISH_MOBILE_ONLY
) && nfs_is_mobile
== 0)
5493 if ((nfs_squishy_flags
& NFS_SQUISH_AUTOMOUNTED_ONLY
) && (flags
& MNT_AUTOMOUNTED
) == 0)
5500 * NFS mounts default to "rw,hard" - but frequently on mobile clients
5501 * the mount may become "not responding". It's desirable to be able
5502 * to unmount these dead mounts, but only if there is no risk of
5503 * losing data or crashing applications. A "squishy" NFS mount is one
5504 * that can be force unmounted with little risk of harm.
5506 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
5507 * in a squishy state iff it is allowed to be squishy and there are no
5508 * dirty pages and there are no mmapped files and there are no files
5509 * open for write. Mounts are allowed to be squishy is controlled by
5510 * the settings of the nfs_squishy_flags and its mobility state. These
5511 * flags can be set by sysctls.
5513 * If nfs_is_squishy determines that we are in a squishy state we will
5514 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
5515 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
5516 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
5517 * update the current dead timeout with the mount's nm_deadtimeout
5518 * value set at mount time.
5520 * Assumes that nm_lock is held.
5522 * Note this routine is racey, but its effects on setting the
5523 * dead timeout only have effects when we're in trouble and are likely
5524 * to stay that way. Since by default its only for automounted
5525 * volumes on mobile machines; this is a reasonable trade off between
5526 * data integrity and user experience. It can be disabled or set via
5531 nfs_is_squishy(struct nfsmount
*nmp
)
5533 mount_t mp
= nmp
->nm_mountp
;
5535 int timeo
= (nfs_squishy_flags
& NFS_SQUISH_QUICK
) ? NFS_SQUISHY_QUICKTIMEOUT
: NFS_SQUISHY_DEADTIMEOUT
;
5537 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
5538 vfs_statfs(mp
)->f_mntfromname
, nmp
->nm_curdeadtimeout
, nfs_is_mobile
);
5540 if (!nfs_can_squish(nmp
))
5543 timeo
= (nmp
->nm_deadtimeout
> timeo
) ? max(nmp
->nm_deadtimeout
/8, timeo
) : timeo
;
5544 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp
->nm_writers
, nmp
->nm_mappers
, timeo
);
5546 if (nmp
->nm_writers
== 0 && nmp
->nm_mappers
== 0) {
5547 uint64_t flags
= mp
? vfs_flags(mp
) : 0;
5551 * Walk the nfs nodes and check for dirty buffers it we're not
5552 * RDONLY and we've not already been declared as squishy since
5553 * this can be a bit expensive.
5555 if (!(flags
& MNT_RDONLY
) && !(nmp
->nm_state
& NFSSTA_SQUISHY
))
5556 squishy
= !nfs_mount_is_dirty(mp
);
5561 nmp
->nm_state
|= NFSSTA_SQUISHY
;
5563 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
5565 nmp
->nm_curdeadtimeout
= squishy
? timeo
: nmp
->nm_deadtimeout
;
5567 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp
->nm_curdeadtimeout
);
5573 * On a send operation, if we can't reach the server and we've got only one server to talk to
5574 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
5575 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
5578 nfs_is_dead(int error
, struct nfsmount
*nmp
)
5582 lck_mtx_lock(&nmp
->nm_lock
);
5583 if (nmp
->nm_state
& NFSSTA_DEAD
) {
5584 lck_mtx_unlock(&nmp
->nm_lock
);
5588 if ((error
!= ENETUNREACH
&& error
!= EHOSTUNREACH
&& error
!= EADDRNOTAVAIL
) ||
5589 !(nmp
->nm_locations
.nl_numlocs
== 1 && nmp
->nm_locations
.nl_locations
[0]->nl_servcount
== 1)) {
5590 lck_mtx_unlock(&nmp
->nm_lock
);
5594 if ((nfs_squishy_flags
& NFS_SQUISH_QUICK
) && nfs_is_squishy(nmp
)) {
5595 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
5596 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
5597 lck_mtx_unlock(&nmp
->nm_lock
);
5598 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
5599 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
5602 lck_mtx_unlock(&nmp
->nm_lock
);
5607 * If we've experienced timeouts and we're not really a
5608 * classic hard mount, then just return cached data to
5609 * the caller instead of likely hanging on an RPC.
5612 nfs_use_cache(struct nfsmount
*nmp
)
5615 *%%% We always let mobile users goto the cache,
5616 * perhaps we should not even require them to have
5619 int cache_ok
= (nfs_is_mobile
|| NMFLAG(nmp
, SOFT
) ||
5620 nfs_can_squish(nmp
) || nmp
->nm_deadtimeout
);
5622 int timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5625 * So if we have a timeout and we're not really a hard hard-mount,
5626 * return 1 to not get things out of the cache.
5629 return ((nmp
->nm_state
& timeoutmask
) && cache_ok
);
5633 * Log a message that nfs or lockd server is unresponsive. Check if we
5634 * can be squished and if we can, or that our dead timeout has
5635 * expired, and we're not holding state, set our mount as dead, remove
5636 * our mount state and ask to be unmounted. If we are holding state
5637 * we're being called from the nfs_request_timer and will soon detect
5638 * that we need to unmount.
5641 nfs_down(struct nfsmount
*nmp
, thread_t thd
, int error
, int flags
, const char *msg
, int holding_state
)
5643 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
5644 uint32_t do_vfs_signal
= 0;
5647 if (nfs_mount_gone(nmp
))
5650 lck_mtx_lock(&nmp
->nm_lock
);
5652 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5653 if (NMFLAG(nmp
, MUTEJUKEBOX
)) /* jukebox timeouts don't count as unresponsive if muted */
5654 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
5655 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
5657 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5658 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
5660 if ((flags
& NFSSTA_TIMEO
) && !(nmp
->nm_state
& NFSSTA_TIMEO
))
5661 nmp
->nm_state
|= NFSSTA_TIMEO
;
5662 if ((flags
& NFSSTA_LOCKTIMEO
) && !(nmp
->nm_state
& NFSSTA_LOCKTIMEO
))
5663 nmp
->nm_state
|= NFSSTA_LOCKTIMEO
;
5664 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && !(nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
))
5665 nmp
->nm_state
|= NFSSTA_JUKEBOXTIMEO
;
5667 unresponsive
= (nmp
->nm_state
& timeoutmask
);
5669 nfs_is_squishy(nmp
);
5671 if (unresponsive
&& (nmp
->nm_curdeadtimeout
> 0)) {
5673 if (!wasunresponsive
) {
5674 nmp
->nm_deadto_start
= now
.tv_sec
;
5675 nfs_mount_sock_thread_wake(nmp
);
5676 } else if ((now
.tv_sec
- nmp
->nm_deadto_start
) > nmp
->nm_curdeadtimeout
&& !holding_state
) {
5677 if (!(nmp
->nm_state
& NFSSTA_DEAD
))
5678 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
5679 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
5680 do_vfs_signal
= VQ_DEAD
;
5683 lck_mtx_unlock(&nmp
->nm_lock
);
5685 if (do_vfs_signal
== VQ_DEAD
&& !(nmp
->nm_state
& NFSSTA_DEAD
))
5686 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
5687 else if (softnobrowse
|| wasunresponsive
|| !unresponsive
)
5690 do_vfs_signal
= VQ_NOTRESP
;
5692 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, do_vfs_signal
, 0);
5694 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, error
);
5698 nfs_up(struct nfsmount
*nmp
, thread_t thd
, int flags
, const char *msg
)
5700 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
5703 if (nfs_mount_gone(nmp
))
5707 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, 0);
5709 lck_mtx_lock(&nmp
->nm_lock
);
5711 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5712 if (NMFLAG(nmp
, MUTEJUKEBOX
)) /* jukebox timeouts don't count as unresponsive if muted */
5713 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
5714 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
5716 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5717 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
5719 if ((flags
& NFSSTA_TIMEO
) && (nmp
->nm_state
& NFSSTA_TIMEO
))
5720 nmp
->nm_state
&= ~NFSSTA_TIMEO
;
5721 if ((flags
& NFSSTA_LOCKTIMEO
) && (nmp
->nm_state
& NFSSTA_LOCKTIMEO
))
5722 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
5723 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && (nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
))
5724 nmp
->nm_state
&= ~NFSSTA_JUKEBOXTIMEO
;
5726 unresponsive
= (nmp
->nm_state
& timeoutmask
);
5728 nmp
->nm_deadto_start
= 0;
5729 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
5730 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
5731 lck_mtx_unlock(&nmp
->nm_lock
);
5736 do_vfs_signal
= (wasunresponsive
&& !unresponsive
);
5738 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_NOTRESP
, 1);
5742 #endif /* NFSCLIENT */
5747 * Generate the rpc reply header
5748 * siz arg. is used to decide if adding a cluster is worthwhile
5752 struct nfsrv_descript
*nd
,
5753 __unused
struct nfsrv_sock
*slp
,
5754 struct nfsm_chain
*nmrepp
,
5759 struct nfsm_chain nmrep
;
5762 err
= nd
->nd_repstat
;
5763 if (err
&& (nd
->nd_vers
== NFS_VER2
))
5767 * If this is a big reply, use a cluster else
5768 * try and leave leading space for the lower level headers.
5770 siz
+= RPC_REPLYSIZ
;
5771 if (siz
>= nfs_mbuf_minclsize
) {
5772 error
= mbuf_getpacket(MBUF_WAITOK
, &mrep
);
5774 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mrep
);
5777 /* unable to allocate packet */
5778 /* XXX should we keep statistics for these errors? */
5781 if (siz
< nfs_mbuf_minclsize
) {
5782 /* leave space for lower level headers */
5783 tl
= mbuf_data(mrep
);
5784 tl
+= 80/sizeof(*tl
); /* XXX max_hdr? XXX */
5785 mbuf_setdata(mrep
, tl
, 6 * NFSX_UNSIGNED
);
5787 nfsm_chain_init(&nmrep
, mrep
);
5788 nfsm_chain_add_32(error
, &nmrep
, nd
->nd_retxid
);
5789 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
5790 if (err
== ERPCMISMATCH
|| (err
& NFSERR_AUTHERR
)) {
5791 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
5792 if (err
& NFSERR_AUTHERR
) {
5793 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
5794 nfsm_chain_add_32(error
, &nmrep
, (err
& ~NFSERR_AUTHERR
));
5796 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
5797 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
5798 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
5802 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
5803 if (nd
->nd_gss_context
!= NULL
) {
5804 /* RPCSEC_GSS verifier */
5805 error
= nfs_gss_svc_verf_put(nd
, &nmrep
);
5807 nfsm_chain_add_32(error
, &nmrep
, RPC_SYSTEM_ERR
);
5811 /* RPCAUTH_NULL verifier */
5812 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
5813 nfsm_chain_add_32(error
, &nmrep
, 0);
5815 /* accepted status */
5818 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
5821 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
5822 /* XXX hard coded versions? */
5823 nfsm_chain_add_32(error
, &nmrep
, NFS_VER2
);
5824 nfsm_chain_add_32(error
, &nmrep
, NFS_VER3
);
5827 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
5830 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
5833 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
5834 if (nd
->nd_gss_context
!= NULL
)
5835 error
= nfs_gss_svc_prepare_reply(nd
, &nmrep
);
5836 if (err
!= NFSERR_RETVOID
)
5837 nfsm_chain_add_32(error
, &nmrep
,
5838 (err
? nfsrv_errmap(nd
, err
) : 0));
5844 nfsm_chain_build_done(error
, &nmrep
);
5846 /* error composing reply header */
5847 /* XXX should we keep statistics for these errors? */
5853 if ((err
!= 0) && (err
!= NFSERR_RETVOID
))
5854 OSAddAtomic64(1, &nfsstats
.srvrpc_errs
);
5859 * The nfs server send routine.
5861 * - return EINTR or ERESTART if interrupted by a signal
5862 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
5863 * - do any cleanup required by recoverable socket errors (???)
5866 nfsrv_send(struct nfsrv_sock
*slp
, mbuf_t nam
, mbuf_t top
)
5869 socket_t so
= slp
->ns_so
;
5870 struct sockaddr
*sendnam
;
5873 bzero(&msg
, sizeof(msg
));
5874 if (nam
&& !sock_isconnected(so
) && (slp
->ns_sotype
!= SOCK_STREAM
)) {
5875 if ((sendnam
= mbuf_data(nam
))) {
5876 msg
.msg_name
= (caddr_t
)sendnam
;
5877 msg
.msg_namelen
= sendnam
->sa_len
;
5880 error
= sock_sendmbuf(so
, &msg
, top
, 0, NULL
);
5883 log(LOG_INFO
, "nfsd send error %d\n", error
);
5885 if ((error
== EWOULDBLOCK
) && (slp
->ns_sotype
== SOCK_STREAM
))
5886 error
= EPIPE
; /* zap TCP sockets if they time out on send */
5888 /* Handle any recoverable (soft) socket errors here. (???) */
5889 if (error
!= EINTR
&& error
!= ERESTART
&& error
!= EIO
&&
5890 error
!= EWOULDBLOCK
&& error
!= EPIPE
)
5897 * Socket upcall routine for the nfsd sockets.
5898 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
5899 * Essentially do as much as possible non-blocking, else punt and it will
5900 * be called with MBUF_WAITOK from an nfsd.
5903 nfsrv_rcv(socket_t so
, void *arg
, int waitflag
)
5905 struct nfsrv_sock
*slp
= arg
;
5907 if (!nfsd_thread_count
|| !(slp
->ns_flag
& SLP_VALID
))
5910 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
5911 nfsrv_rcv_locked(so
, slp
, waitflag
);
5912 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
5915 nfsrv_rcv_locked(socket_t so
, struct nfsrv_sock
*slp
, int waitflag
)
5917 mbuf_t m
, mp
, mhck
, m2
;
5918 int ns_flag
=0, error
;
5922 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
5923 if (waitflag
== MBUF_DONTWAIT
)
5924 lck_rw_done(&slp
->ns_rwlock
);
5930 * Define this to test for nfsds handling this under heavy load.
5932 if (waitflag
== MBUF_DONTWAIT
) {
5933 ns_flag
= SLP_NEEDQ
;
5937 if (slp
->ns_sotype
== SOCK_STREAM
) {
5939 * If there are already records on the queue, defer soreceive()
5940 * to an(other) nfsd so that there is feedback to the TCP layer that
5941 * the nfs servers are heavily loaded.
5944 ns_flag
= SLP_NEEDQ
;
5951 bytes_read
= 1000000000;
5952 error
= sock_receivembuf(so
, NULL
, &mp
, MSG_DONTWAIT
, &bytes_read
);
5953 if (error
|| mp
== NULL
) {
5954 if (error
== EWOULDBLOCK
)
5955 ns_flag
= (waitflag
== MBUF_DONTWAIT
) ? SLP_NEEDQ
: 0;
5957 ns_flag
= SLP_DISCONN
;
5961 if (slp
->ns_rawend
) {
5962 if ((error
= mbuf_setnext(slp
->ns_rawend
, m
)))
5963 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error
);
5964 slp
->ns_cc
+= bytes_read
;
5967 slp
->ns_cc
= bytes_read
;
5969 while ((m2
= mbuf_next(m
)))
5974 * Now try and parse record(s) out of the raw stream data.
5976 error
= nfsrv_getstream(slp
, waitflag
);
5979 ns_flag
= SLP_DISCONN
;
5981 ns_flag
= SLP_NEEDQ
;
5984 struct sockaddr_storage nam
;
5986 if (slp
->ns_reccnt
>= nfsrv_sock_max_rec_queue_length
) {
5987 /* already have max # RPC records queued on this socket */
5988 ns_flag
= SLP_NEEDQ
;
5992 bzero(&msg
, sizeof(msg
));
5993 msg
.msg_name
= (caddr_t
)&nam
;
5994 msg
.msg_namelen
= sizeof(nam
);
5997 bytes_read
= 1000000000;
5998 error
= sock_receivembuf(so
, &msg
, &mp
, MSG_DONTWAIT
| MSG_NEEDSA
, &bytes_read
);
6000 if (msg
.msg_name
&& (mbuf_get(MBUF_WAITOK
, MBUF_TYPE_SONAME
, &mhck
) == 0)) {
6001 mbuf_setlen(mhck
, nam
.ss_len
);
6002 bcopy(&nam
, mbuf_data(mhck
), nam
.ss_len
);
6004 if (mbuf_setnext(m
, mp
)) {
6005 /* trouble... just drop it */
6006 printf("nfsrv_rcv: mbuf_setnext failed\n");
6014 mbuf_setnextpkt(slp
->ns_recend
, m
);
6017 slp
->ns_flag
|= SLP_DOREC
;
6020 mbuf_setnextpkt(m
, NULL
);
6027 * Now try and process the request records, non-blocking.
6031 slp
->ns_flag
|= ns_flag
;
6032 if (waitflag
== MBUF_DONTWAIT
) {
6033 int wake
= (slp
->ns_flag
& SLP_WORKTODO
);
6034 lck_rw_done(&slp
->ns_rwlock
);
6035 if (wake
&& nfsd_thread_count
) {
6036 lck_mtx_lock(nfsd_mutex
);
6037 nfsrv_wakenfsd(slp
);
6038 lck_mtx_unlock(nfsd_mutex
);
6044 * Try and extract an RPC request from the mbuf data list received on a
6045 * stream socket. The "waitflag" argument indicates whether or not it
6049 nfsrv_getstream(struct nfsrv_sock
*slp
, int waitflag
)
6052 char *cp1
, *cp2
, *mdata
;
6053 int len
, mlen
, error
;
6054 mbuf_t om
, m2
, recm
;
6057 if (slp
->ns_flag
& SLP_GETSTREAM
)
6058 panic("nfs getstream");
6059 slp
->ns_flag
|= SLP_GETSTREAM
;
6061 if (slp
->ns_reclen
== 0) {
6062 if (slp
->ns_cc
< NFSX_UNSIGNED
) {
6063 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6067 mdata
= mbuf_data(m
);
6069 if (mlen
>= NFSX_UNSIGNED
) {
6070 bcopy(mdata
, (caddr_t
)&recmark
, NFSX_UNSIGNED
);
6071 mdata
+= NFSX_UNSIGNED
;
6072 mlen
-= NFSX_UNSIGNED
;
6073 mbuf_setdata(m
, mdata
, mlen
);
6075 cp1
= (caddr_t
)&recmark
;
6077 while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
6085 mbuf_setdata(m
, cp2
, mlen
);
6088 slp
->ns_cc
-= NFSX_UNSIGNED
;
6089 recmark
= ntohl(recmark
);
6090 slp
->ns_reclen
= recmark
& ~0x80000000;
6091 if (recmark
& 0x80000000)
6092 slp
->ns_flag
|= SLP_LASTFRAG
;
6094 slp
->ns_flag
&= ~SLP_LASTFRAG
;
6095 if (slp
->ns_reclen
<= 0 || slp
->ns_reclen
> NFS_MAXPACKET
) {
6096 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6102 * Now get the record part.
6104 * Note that slp->ns_reclen may be 0. Linux sometimes
6105 * generates 0-length RPCs
6108 if (slp
->ns_cc
== slp
->ns_reclen
) {
6110 slp
->ns_raw
= slp
->ns_rawend
= NULL
;
6111 slp
->ns_cc
= slp
->ns_reclen
= 0;
6112 } else if (slp
->ns_cc
> slp
->ns_reclen
) {
6116 mdata
= mbuf_data(m
);
6118 while (len
< slp
->ns_reclen
) {
6119 if ((len
+ mlen
) > slp
->ns_reclen
) {
6120 if (mbuf_copym(m
, 0, slp
->ns_reclen
- len
, waitflag
, &m2
)) {
6121 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6122 return (EWOULDBLOCK
);
6125 if (mbuf_setnext(om
, m2
)) {
6126 /* trouble... just drop it */
6127 printf("nfsrv_getstream: mbuf_setnext failed\n");
6129 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6130 return (EWOULDBLOCK
);
6136 mdata
+= slp
->ns_reclen
- len
;
6137 mlen
-= slp
->ns_reclen
- len
;
6138 mbuf_setdata(m
, mdata
, mlen
);
6139 len
= slp
->ns_reclen
;
6140 } else if ((len
+ mlen
) == slp
->ns_reclen
) {
6145 if (mbuf_setnext(om
, NULL
)) {
6146 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6147 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6148 return (EWOULDBLOCK
);
6151 mdata
= mbuf_data(m
);
6157 mdata
= mbuf_data(m
);
6164 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6169 * Accumulate the fragments into a record.
6171 if (slp
->ns_frag
== NULL
) {
6172 slp
->ns_frag
= recm
;
6175 while ((m2
= mbuf_next(m
)))
6177 if ((error
= mbuf_setnext(m
, recm
)))
6178 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error
);
6180 if (slp
->ns_flag
& SLP_LASTFRAG
) {
6182 mbuf_setnextpkt(slp
->ns_recend
, slp
->ns_frag
);
6184 slp
->ns_rec
= slp
->ns_frag
;
6185 slp
->ns_flag
|= SLP_DOREC
;
6187 slp
->ns_recend
= slp
->ns_frag
;
6188 slp
->ns_frag
= NULL
;
6194 * Parse an RPC header.
6198 struct nfsrv_sock
*slp
,
6200 struct nfsrv_descript
**ndp
)
6204 struct nfsrv_descript
*nd
;
6208 if (!(slp
->ns_flag
& (SLP_VALID
|SLP_DOREC
)) || (slp
->ns_rec
== NULL
))
6210 MALLOC_ZONE(nd
, struct nfsrv_descript
*,
6211 sizeof (struct nfsrv_descript
), M_NFSRVDESC
, M_WAITOK
);
6215 slp
->ns_rec
= mbuf_nextpkt(m
);
6217 mbuf_setnextpkt(m
, NULL
);
6219 slp
->ns_flag
&= ~SLP_DOREC
;
6220 slp
->ns_recend
= NULL
;
6223 if (mbuf_type(m
) == MBUF_TYPE_SONAME
) {
6226 if ((error
= mbuf_setnext(nam
, NULL
)))
6227 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error
);
6231 nfsm_chain_dissect_init(error
, &nd
->nd_nmreq
, m
);
6233 error
= nfsrv_getreq(nd
);
6237 if (nd
->nd_gss_context
)
6238 nfs_gss_svc_ctx_deref(nd
->nd_gss_context
);
6239 FREE_ZONE(nd
, sizeof(*nd
), M_NFSRVDESC
);
6249 * Parse an RPC request
6251 * - fill in the cred struct.
6254 nfsrv_getreq(struct nfsrv_descript
*nd
)
6256 struct nfsm_chain
*nmreq
;
6258 u_int32_t nfsvers
, auth_type
;
6266 nd
->nd_gss_context
= NULL
;
6267 nd
->nd_gss_seqnum
= 0;
6268 nd
->nd_gss_mb
= NULL
;
6270 user_id
= group_id
= -2;
6271 val
= auth_type
= len
= 0;
6273 nmreq
= &nd
->nd_nmreq
;
6274 nfsm_chain_get_32(error
, nmreq
, nd
->nd_retxid
); // XID
6275 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Call
6276 if (!error
&& (val
!= RPC_CALL
))
6280 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Version
6282 if (val
!= RPC_VER2
) {
6283 nd
->nd_repstat
= ERPCMISMATCH
;
6284 nd
->nd_procnum
= NFSPROC_NOOP
;
6287 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Program Number
6289 if (val
!= NFS_PROG
) {
6290 nd
->nd_repstat
= EPROGUNAVAIL
;
6291 nd
->nd_procnum
= NFSPROC_NOOP
;
6294 nfsm_chain_get_32(error
, nmreq
, nfsvers
);// NFS Version Number
6296 if ((nfsvers
< NFS_VER2
) || (nfsvers
> NFS_VER3
)) {
6297 nd
->nd_repstat
= EPROGMISMATCH
;
6298 nd
->nd_procnum
= NFSPROC_NOOP
;
6301 nd
->nd_vers
= nfsvers
;
6302 nfsm_chain_get_32(error
, nmreq
, nd
->nd_procnum
);// NFS Procedure Number
6304 if ((nd
->nd_procnum
>= NFS_NPROCS
) ||
6305 ((nd
->nd_vers
== NFS_VER2
) && (nd
->nd_procnum
> NFSV2PROC_STATFS
))) {
6306 nd
->nd_repstat
= EPROCUNAVAIL
;
6307 nd
->nd_procnum
= NFSPROC_NOOP
;
6310 if (nfsvers
!= NFS_VER3
)
6311 nd
->nd_procnum
= nfsv3_procid
[nd
->nd_procnum
];
6312 nfsm_chain_get_32(error
, nmreq
, auth_type
); // Auth Flavor
6313 nfsm_chain_get_32(error
, nmreq
, len
); // Auth Length
6314 if (!error
&& (len
< 0 || len
> RPCAUTH_MAXSIZ
))
6318 /* Handle authentication */
6319 if (auth_type
== RPCAUTH_SYS
) {
6320 struct posix_cred temp_pcred
;
6321 if (nd
->nd_procnum
== NFSPROC_NULL
)
6323 nd
->nd_sec
= RPCAUTH_SYS
;
6324 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // skip stamp
6325 nfsm_chain_get_32(error
, nmreq
, len
); // hostname length
6326 if (len
< 0 || len
> NFS_MAXNAMLEN
)
6328 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
)); // skip hostname
6331 /* create a temporary credential using the bits from the wire */
6332 bzero(&temp_pcred
, sizeof(temp_pcred
));
6333 nfsm_chain_get_32(error
, nmreq
, user_id
);
6334 nfsm_chain_get_32(error
, nmreq
, group_id
);
6335 temp_pcred
.cr_groups
[0] = group_id
;
6336 nfsm_chain_get_32(error
, nmreq
, len
); // extra GID count
6337 if ((len
< 0) || (len
> RPCAUTH_UNIXGIDS
))
6340 for (i
= 1; i
<= len
; i
++)
6342 nfsm_chain_get_32(error
, nmreq
, temp_pcred
.cr_groups
[i
]);
6344 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
);
6346 ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (len
+ 1);
6348 nfsrv_group_sort(&temp_pcred
.cr_groups
[0], ngroups
);
6349 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
6350 nfsm_chain_get_32(error
, nmreq
, len
); // verifier length
6351 if (len
< 0 || len
> RPCAUTH_MAXSIZ
)
6354 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
));
6356 /* request creation of a real credential */
6357 temp_pcred
.cr_uid
= user_id
;
6358 temp_pcred
.cr_ngroups
= ngroups
;
6359 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
6360 if (nd
->nd_cr
== NULL
) {
6361 nd
->nd_repstat
= ENOMEM
;
6362 nd
->nd_procnum
= NFSPROC_NOOP
;
6365 } else if (auth_type
== RPCSEC_GSS
) {
6366 error
= nfs_gss_svc_cred_get(nd
, nmreq
);
6368 if (error
== EINVAL
)
6369 goto nfsmout
; // drop the request
6370 nd
->nd_repstat
= error
;
6371 nd
->nd_procnum
= NFSPROC_NOOP
;
6375 if (nd
->nd_procnum
== NFSPROC_NULL
) // assume it's AUTH_NONE
6377 nd
->nd_repstat
= (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
6378 nd
->nd_procnum
= NFSPROC_NOOP
;
6383 if (IS_VALID_CRED(nd
->nd_cr
))
6384 kauth_cred_unref(&nd
->nd_cr
);
6385 nfsm_chain_cleanup(nmreq
);
6390 * Search for a sleeping nfsd and wake it up.
6391 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
6392 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
6393 * Note: Must be called with nfsd_mutex held.
6396 nfsrv_wakenfsd(struct nfsrv_sock
*slp
)
6400 if ((slp
->ns_flag
& SLP_VALID
) == 0)
6403 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
6404 /* if there's work to do on this socket, make sure it's queued up */
6405 if ((slp
->ns_flag
& SLP_WORKTODO
) && !(slp
->ns_flag
& SLP_QUEUED
)) {
6406 TAILQ_INSERT_TAIL(&nfsrv_sockwait
, slp
, ns_svcq
);
6407 slp
->ns_flag
|= SLP_WAITQ
;
6409 lck_rw_done(&slp
->ns_rwlock
);
6411 /* wake up a waiting nfsd, if possible */
6412 nd
= TAILQ_FIRST(&nfsd_queue
);
6416 TAILQ_REMOVE(&nfsd_queue
, nd
, nfsd_queue
);
6417 nd
->nfsd_flag
&= ~NFSD_WAITING
;
6421 #endif /* NFSSERVER */