2 * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
69 * Socket operations for use by nfs
72 #include <sys/param.h>
73 #include <sys/systm.h>
75 #include <sys/signalvar.h>
76 #include <sys/kauth.h>
77 #include <sys/mount_internal.h>
78 #include <sys/kernel.h>
79 #include <sys/kpi_mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/vnode.h>
82 #include <sys/domain.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/syslog.h>
86 #include <sys/tprintf.h>
87 #include <libkern/OSAtomic.h>
90 #include <kern/clock.h>
91 #include <kern/task.h>
92 #include <kern/thread.h>
93 #include <kern/thread_call.h>
97 #include <netinet/in.h>
98 #include <netinet/tcp.h>
100 #include <nfs/rpcv2.h>
101 #include <nfs/krpc.h>
102 #include <nfs/nfsproto.h>
104 #include <nfs/xdr_subs.h>
105 #include <nfs/nfsm_subs.h>
106 #include <nfs/nfs_gss.h>
107 #include <nfs/nfsmount.h>
108 #include <nfs/nfsnode.h>
110 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
113 boolean_t
current_thread_aborted(void);
114 kern_return_t
thread_terminate(thread_t
);
118 int nfsrv_sock_max_rec_queue_length
= 128; /* max # RPC records queued on (UDP) socket */
120 int nfsrv_getstream(struct nfsrv_sock
*, int);
121 int nfsrv_getreq(struct nfsrv_descript
*);
122 extern int nfsv3_procid
[NFS_NPROCS
];
123 #endif /* NFSSERVER */
126 * compare two sockaddr structures
129 nfs_sockaddr_cmp(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
137 if (sa1
->sa_family
!= sa2
->sa_family
) {
138 return (sa1
->sa_family
< sa2
->sa_family
) ? -1 : 1;
140 if (sa1
->sa_len
!= sa2
->sa_len
) {
141 return (sa1
->sa_len
< sa2
->sa_len
) ? -1 : 1;
143 if (sa1
->sa_family
== AF_INET
) {
144 return bcmp(&((struct sockaddr_in
*)sa1
)->sin_addr
,
145 &((struct sockaddr_in
*)sa2
)->sin_addr
, sizeof(((struct sockaddr_in
*)sa1
)->sin_addr
));
147 if (sa1
->sa_family
== AF_INET6
) {
148 return bcmp(&((struct sockaddr_in6
*)sa1
)->sin6_addr
,
149 &((struct sockaddr_in6
*)sa2
)->sin6_addr
, sizeof(((struct sockaddr_in6
*)sa1
)->sin6_addr
));
156 int nfs_connect_search_new_socket(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
157 int nfs_connect_search_socket_connect(struct nfsmount
*, struct nfs_socket
*, int);
158 int nfs_connect_search_ping(struct nfsmount
*, struct nfs_socket
*, struct timeval
*);
159 void nfs_connect_search_socket_found(struct nfsmount
*, struct nfs_socket_search
*, struct nfs_socket
*);
160 void nfs_connect_search_socket_reap(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
161 int nfs_connect_search_check(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
162 int nfs_reconnect(struct nfsmount
*);
163 int nfs_connect_setup(struct nfsmount
*);
164 void nfs_mount_sock_thread(void *, wait_result_t
);
165 void nfs_udp_rcv(socket_t
, void*, int);
166 void nfs_tcp_rcv(socket_t
, void*, int);
167 void nfs_sock_poke(struct nfsmount
*);
168 void nfs_request_match_reply(struct nfsmount
*, mbuf_t
);
169 void nfs_reqdequeue(struct nfsreq
*);
170 void nfs_reqbusy(struct nfsreq
*);
171 struct nfsreq
*nfs_reqnext(struct nfsreq
*);
172 int nfs_wait_reply(struct nfsreq
*);
173 void nfs_softterm(struct nfsreq
*);
174 int nfs_can_squish(struct nfsmount
*);
175 int nfs_is_squishy(struct nfsmount
*);
176 int nfs_is_dead(int, struct nfsmount
*);
179 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
180 * Use the mean and mean deviation of rtt for the appropriate type of rpc
181 * for the frequent rpcs and a default for the others.
182 * The justification for doing "other" this way is that these rpcs
183 * happen so infrequently that timer est. would probably be stale.
184 * Also, since many of these rpcs are
185 * non-idempotent, a conservative timeout is desired.
186 * getattr, lookup - A+2D
190 #define NFS_RTO(n, t) \
191 ((t) == 0 ? (n)->nm_timeo : \
193 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
194 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
195 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
196 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
199 * Defines which timer to use for the procnum.
206 static int proct
[NFS_NPROCS
] = {
207 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0
211 * There is a congestion window for outstanding rpcs maintained per mount
212 * point. The cwnd size is adjusted in roughly the way that:
213 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
214 * SIGCOMM '88". ACM, August 1988.
215 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
216 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
217 * of rpcs is in progress.
218 * (The sent count and cwnd are scaled for integer arith.)
219 * Variants of "slow start" were tried and were found to be too much of a
220 * performance hit (ave. rtt 3 times larger),
221 * I suspect due to the large rtt that nfs rpcs have.
223 #define NFS_CWNDSCALE 256
224 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
225 static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
228 * Increment location index to next address/server/location.
231 nfs_location_next(struct nfs_fs_locations
*nlp
, struct nfs_location_index
*nlip
)
233 uint8_t loc
= nlip
->nli_loc
;
234 uint8_t serv
= nlip
->nli_serv
;
235 uint8_t addr
= nlip
->nli_addr
;
237 /* move to next address */
239 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
240 /* no more addresses on current server, go to first address of next server */
244 if (serv
>= nlp
->nl_locations
[loc
]->nl_servcount
) {
245 /* no more servers on current location, go to first server of next location */
248 if (loc
>= nlp
->nl_numlocs
) {
249 loc
= 0; /* after last location, wrap back around to first location */
254 * It's possible for this next server to not have any addresses.
255 * Check for that here and go to the next server.
256 * But bail out if we've managed to come back around to the original
257 * location that was passed in. (That would mean no servers had any
258 * addresses. And we don't want to spin here forever.)
260 if ((loc
== nlip
->nli_loc
) && (serv
== nlip
->nli_serv
) && (addr
== nlip
->nli_addr
)) {
263 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
268 nlip
->nli_serv
= serv
;
269 nlip
->nli_addr
= addr
;
273 * Compare two location indices.
276 nfs_location_index_cmp(struct nfs_location_index
*nlip1
, struct nfs_location_index
*nlip2
)
278 if (nlip1
->nli_loc
!= nlip2
->nli_loc
) {
279 return nlip1
->nli_loc
- nlip2
->nli_loc
;
281 if (nlip1
->nli_serv
!= nlip2
->nli_serv
) {
282 return nlip1
->nli_serv
- nlip2
->nli_serv
;
284 return nlip1
->nli_addr
- nlip2
->nli_addr
;
288 * Get the mntfromname (or path portion only) for a given location.
291 nfs_location_mntfromname(struct nfs_fs_locations
*locs
, struct nfs_location_index idx
, char *s
, int size
, int pathonly
)
293 struct nfs_fs_location
*fsl
= locs
->nl_locations
[idx
.nli_loc
];
299 cnt
= snprintf(p
, size
, "%s:", fsl
->nl_servers
[idx
.nli_serv
]->ns_name
);
303 if (fsl
->nl_path
.np_compcount
== 0) {
304 /* mounting root export on server */
311 /* append each server path component */
312 for (i
= 0; (size
> 0) && (i
< (int)fsl
->nl_path
.np_compcount
); i
++) {
313 cnt
= snprintf(p
, size
, "/%s", fsl
->nl_path
.np_components
[i
]);
320 * NFS client connect socket upcall.
321 * (Used only during socket connect/search.)
324 nfs_connect_upcall(socket_t so
, void *arg
, __unused
int waitflag
)
326 struct nfs_socket
*nso
= arg
;
329 int error
= 0, recv
= 1;
331 if (nso
->nso_flags
& NSO_CONNECTING
) {
332 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting\n", nso
);
333 wakeup(nso
->nso_wake
);
337 lck_mtx_lock(&nso
->nso_lock
);
338 if ((nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) || !(nso
->nso_flags
& NSO_PINGING
)) {
339 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso
);
340 lck_mtx_unlock(&nso
->nso_lock
);
343 NFS_SOCK_DBG("nfs connect - socket %p upcall\n", nso
);
344 nso
->nso_flags
|= NSO_UPCALL
;
346 /* loop while we make error-free progress */
347 while (!error
&& recv
) {
348 /* make sure we're still interested in this socket */
349 if (nso
->nso_flags
& (NSO_DISCONNECTING
| NSO_DEAD
)) {
352 lck_mtx_unlock(&nso
->nso_lock
);
354 if (nso
->nso_sotype
== SOCK_STREAM
) {
355 error
= nfs_rpc_record_read(so
, &nso
->nso_rrs
, MSG_DONTWAIT
, &recv
, &m
);
358 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
361 lck_mtx_lock(&nso
->nso_lock
);
363 /* match response with request */
364 struct nfsm_chain nmrep
;
365 uint32_t reply
= 0, rxid
= 0, verf_type
, verf_len
;
366 uint32_t reply_status
, rejected_status
, accepted_status
;
368 nfsm_chain_dissect_init(error
, &nmrep
, m
);
369 nfsm_chain_get_32(error
, &nmrep
, rxid
);
370 nfsm_chain_get_32(error
, &nmrep
, reply
);
371 if (!error
&& ((reply
!= RPC_REPLY
) || (rxid
!= nso
->nso_pingxid
))) {
374 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
375 if (!error
&& (reply_status
== RPC_MSGDENIED
)) {
376 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
378 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
381 nfsm_chain_get_32(error
, &nmrep
, verf_type
); /* verifier flavor */
382 nfsm_chain_get_32(error
, &nmrep
, verf_len
); /* verifier length */
385 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
387 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
389 if ((accepted_status
== RPC_PROGMISMATCH
) && !nso
->nso_version
) {
390 uint32_t minvers
, maxvers
;
391 nfsm_chain_get_32(error
, &nmrep
, minvers
);
392 nfsm_chain_get_32(error
, &nmrep
, maxvers
);
394 if (nso
->nso_protocol
== PMAPPROG
) {
395 if ((minvers
> RPCBVERS4
) || (maxvers
< PMAPVERS
)) {
396 error
= EPROGMISMATCH
;
397 } else if ((nso
->nso_saddr
->sa_family
== AF_INET
) &&
398 (PMAPVERS
>= minvers
) && (PMAPVERS
<= maxvers
)) {
399 nso
->nso_version
= PMAPVERS
;
400 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
401 if ((RPCBVERS4
>= minvers
) && (RPCBVERS4
<= maxvers
)) {
402 nso
->nso_version
= RPCBVERS4
;
403 } else if ((RPCBVERS3
>= minvers
) && (RPCBVERS3
<= maxvers
)) {
404 nso
->nso_version
= RPCBVERS3
;
407 } else if (nso
->nso_protocol
== NFS_PROG
) {
411 * N.B. Both portmapper and rpcbind V3 are happy to return
412 * addresses for other versions than the one you ask (getport or
413 * getaddr) and thus we may have fallen to this code path. So if
414 * we get a version that we support, use highest supported
415 * version. This assumes that the server supports all versions
416 * between minvers and maxvers. Note for IPv6 we will try and
417 * use rpcbind V4 which has getversaddr and we should not get
418 * here if that was successful.
420 for (vers
= nso
->nso_nfs_max_vers
; vers
>= (int)nso
->nso_nfs_min_vers
; vers
--) {
421 if (vers
>= (int)minvers
&& vers
<= (int)maxvers
) {
425 nso
->nso_version
= (vers
< (int)nso
->nso_nfs_min_vers
) ? 0 : vers
;
427 if (!error
&& nso
->nso_version
) {
428 accepted_status
= RPC_SUCCESS
;
432 switch (accepted_status
) {
436 case RPC_PROGUNAVAIL
:
437 error
= EPROGUNAVAIL
;
439 case RPC_PROGMISMATCH
:
440 error
= EPROGMISMATCH
;
442 case RPC_PROCUNAVAIL
:
443 error
= EPROCUNAVAIL
;
455 nso
->nso_flags
&= ~NSO_PINGING
;
457 nso
->nso_error
= error
;
458 nso
->nso_flags
|= NSO_DEAD
;
460 nso
->nso_flags
|= NSO_VERIFIED
;
463 /* wake up search thread */
464 wakeup(nso
->nso_wake
);
469 nso
->nso_flags
&= ~NSO_UPCALL
;
470 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
471 /* problems with the socket... */
472 nso
->nso_error
= error
? error
: EPIPE
;
473 nso
->nso_flags
|= NSO_DEAD
;
474 wakeup(nso
->nso_wake
);
476 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
477 wakeup(&nso
->nso_flags
);
479 lck_mtx_unlock(&nso
->nso_lock
);
483 * Create/initialize an nfs_socket structure.
487 struct nfsmount
*nmp
,
494 struct nfs_socket
**nsop
)
496 struct nfs_socket
*nso
;
499 #ifdef NFS_SOCKET_DEBUGGING
500 char naddr
[MAX_IPv6_STR_LEN
];
503 if (sa
->sa_family
== AF_INET
) {
504 sinaddr
= &((struct sockaddr_in
*)sa
)->sin_addr
;
506 sinaddr
= &((struct sockaddr_in6
*)sa
)->sin6_addr
;
508 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
) {
509 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
512 char naddr
[1] = { 0 };
517 /* Create the socket. */
518 MALLOC(nso
, struct nfs_socket
*, sizeof(struct nfs_socket
), M_TEMP
, M_WAITOK
| M_ZERO
);
520 MALLOC(nso
->nso_saddr
, struct sockaddr
*, sa
->sa_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
522 if (!nso
|| !nso
->nso_saddr
) {
528 lck_mtx_init(&nso
->nso_lock
, nfs_request_grp
, LCK_ATTR_NULL
);
529 nso
->nso_sotype
= sotype
;
530 if (nso
->nso_sotype
== SOCK_STREAM
) {
531 nfs_rpc_record_state_init(&nso
->nso_rrs
);
534 nso
->nso_timestamp
= now
.tv_sec
;
535 bcopy(sa
, nso
->nso_saddr
, sa
->sa_len
);
536 if (sa
->sa_family
== AF_INET
) {
537 ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
538 } else if (sa
->sa_family
== AF_INET6
) {
539 ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
541 nso
->nso_protocol
= protocol
;
542 nso
->nso_version
= vers
;
543 nso
->nso_nfs_min_vers
= PVER2MAJOR(nmp
->nm_min_vers
);
544 nso
->nso_nfs_max_vers
= PVER2MAJOR(nmp
->nm_max_vers
);
546 error
= sock_socket(sa
->sa_family
, nso
->nso_sotype
, 0, NULL
, NULL
, &nso
->nso_so
);
548 /* Some servers require that the client port be a reserved port number. */
549 if (!error
&& resvport
&& ((sa
->sa_family
== AF_INET
) || (sa
->sa_family
== AF_INET6
))) {
550 struct sockaddr_storage ss
;
551 int level
= (sa
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
552 int optname
= (sa
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
553 int portrange
= IP_PORTRANGE_LOW
;
555 error
= sock_setsockopt(nso
->nso_so
, level
, optname
, &portrange
, sizeof(portrange
));
556 if (!error
) { /* bind now to check for failure */
557 ss
.ss_len
= sa
->sa_len
;
558 ss
.ss_family
= sa
->sa_family
;
559 if (ss
.ss_family
== AF_INET
) {
560 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
561 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
562 } else if (ss
.ss_family
== AF_INET6
) {
563 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
564 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
569 error
= sock_bind(nso
->nso_so
, (struct sockaddr
*)&ss
);
575 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
576 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nso
, naddr
, sotype
,
577 resvport
? "r" : "", port
, protocol
, vers
);
578 nfs_socket_destroy(nso
);
580 NFS_SOCK_DBG("nfs connect %s created socket %p %s type %d%s port %d prot %d %d\n",
581 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, naddr
,
582 sotype
, resvport
? "r" : "", port
, protocol
, vers
);
589 * Destroy an nfs_socket structure.
592 nfs_socket_destroy(struct nfs_socket
*nso
)
594 struct timespec ts
= { 4, 0 };
596 lck_mtx_lock(&nso
->nso_lock
);
597 nso
->nso_flags
|= NSO_DISCONNECTING
;
598 if (nso
->nso_flags
& NSO_UPCALL
) { /* give upcall a chance to complete */
599 msleep(&nso
->nso_flags
, &nso
->nso_lock
, PZERO
- 1, "nfswaitupcall", &ts
);
601 lck_mtx_unlock(&nso
->nso_lock
);
602 sock_shutdown(nso
->nso_so
, SHUT_RDWR
);
603 sock_close(nso
->nso_so
);
604 if (nso
->nso_sotype
== SOCK_STREAM
) {
605 nfs_rpc_record_state_cleanup(&nso
->nso_rrs
);
607 lck_mtx_destroy(&nso
->nso_lock
, nfs_request_grp
);
608 if (nso
->nso_saddr
) {
609 FREE(nso
->nso_saddr
, M_SONAME
);
611 if (nso
->nso_saddr2
) {
612 FREE(nso
->nso_saddr2
, M_SONAME
);
614 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso
);
619 * Set common socket options on an nfs_socket.
622 nfs_socket_options(struct nfsmount
*nmp
, struct nfs_socket
*nso
)
625 * Set socket send/receive timeouts
626 * - Receive timeout shouldn't matter because most receives are performed
627 * in the socket upcall non-blocking.
628 * - Send timeout should allow us to react to a blocked socket.
629 * Soft mounts will want to abort sooner.
631 struct timeval timeo
;
635 timeo
.tv_sec
= (NMFLAG(nmp
, SOFT
) || nfs_can_squish(nmp
)) ? 5 : 60;
636 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
637 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
638 if (nso
->nso_sotype
== SOCK_STREAM
) {
639 /* Assume that SOCK_STREAM always requires a connection */
640 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_KEEPALIVE
, &on
, sizeof(on
));
641 /* set nodelay for TCP */
642 sock_gettype(nso
->nso_so
, NULL
, NULL
, &proto
);
643 if (proto
== IPPROTO_TCP
) {
644 sock_setsockopt(nso
->nso_so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
647 if (nso
->nso_sotype
== SOCK_DGRAM
) { /* set socket buffer sizes for UDP */
648 int reserve
= NFS_UDPSOCKBUF
;
649 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDBUF
, &reserve
, sizeof(reserve
));
650 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVBUF
, &reserve
, sizeof(reserve
));
652 /* set SO_NOADDRERR to detect network changes ASAP */
653 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
654 /* just playin' it safe with upcalls */
655 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
656 /* socket should be interruptible if the mount is */
657 if (!NMFLAG(nmp
, INTR
)) {
658 sock_nointerrupt(nso
->nso_so
, 1);
663 * Release resources held in an nfs_socket_search.
666 nfs_socket_search_cleanup(struct nfs_socket_search
*nss
)
668 struct nfs_socket
*nso
, *nsonext
;
670 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
671 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
673 nfs_socket_destroy(nso
);
676 nfs_socket_destroy(nss
->nss_sock
);
677 nss
->nss_sock
= NULL
;
682 * Prefer returning certain errors over others.
683 * This function returns a ranking of the given error.
686 nfs_connect_error_class(int error
)
721 * Make sure a socket search returns the best error.
724 nfs_socket_search_update_error(struct nfs_socket_search
*nss
, int error
)
726 if (nfs_connect_error_class(error
) >= nfs_connect_error_class(nss
->nss_error
)) {
727 nss
->nss_error
= error
;
731 /* nfs_connect_search_new_socket:
732 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
735 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
736 * could not be used or if a socket timed out.
739 nfs_connect_search_new_socket(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
741 struct nfs_fs_location
*fsl
;
742 struct nfs_fs_server
*fss
;
743 struct sockaddr_storage ss
;
744 struct nfs_socket
*nso
;
749 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
750 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
->nss_addrcnt
);
753 * while there are addresses and:
754 * we have no sockets or
755 * the last address failed and did not produce a socket (nss_last < 0) or
756 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
757 * then attempt to create a socket with the current address.
759 while (nss
->nss_addrcnt
> 0 && ((nss
->nss_last
< 0) || (nss
->nss_sockcnt
== 0) ||
760 ((nss
->nss_sockcnt
< 4) && (now
->tv_sec
>= (nss
->nss_last
+ 2))))) {
761 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
764 /* Can we convert the address to a sockaddr? */
765 fsl
= nmp
->nm_locations
.nl_locations
[nss
->nss_nextloc
.nli_loc
];
766 fss
= fsl
->nl_servers
[nss
->nss_nextloc
.nli_serv
];
767 addrstr
= fss
->ns_addresses
[nss
->nss_nextloc
.nli_addr
];
768 if (!nfs_uaddr2sockaddr(addrstr
, (struct sockaddr
*)&ss
)) {
769 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
770 nss
->nss_addrcnt
-= 1;
774 /* Check that socket family is acceptable. */
775 if (nmp
->nm_sofamily
&& (ss
.ss_family
!= nmp
->nm_sofamily
)) {
776 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
777 nss
->nss_addrcnt
-= 1;
782 /* Create the socket. */
783 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nss
->nss_sotype
,
784 nss
->nss_port
, nss
->nss_protocol
, nss
->nss_version
,
785 ((nss
->nss_protocol
== NFS_PROG
) && NMFLAG(nmp
, RESVPORT
)), &nso
);
790 nso
->nso_location
= nss
->nss_nextloc
;
792 error
= sock_setupcall(nso
->nso_so
, nfs_connect_upcall
, nso
);
794 lck_mtx_lock(&nso
->nso_lock
);
795 nso
->nso_error
= error
;
796 nso
->nso_flags
|= NSO_DEAD
;
797 lck_mtx_unlock(&nso
->nso_lock
);
800 TAILQ_INSERT_TAIL(&nss
->nss_socklist
, nso
, nso_link
);
802 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
803 nss
->nss_addrcnt
-= 1;
805 nss
->nss_last
= now
->tv_sec
;
808 if (nss
->nss_addrcnt
== 0 && nss
->nss_last
< 0) {
809 nss
->nss_last
= now
->tv_sec
;
816 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
817 * If successful set the socket options for the socket as require from the mount.
819 * Assumes: nso->nso_lock is held on entry and return.
822 nfs_connect_search_socket_connect(struct nfsmount
*nmp
, struct nfs_socket
*nso
, int verbose
)
826 if ((nso
->nso_sotype
!= SOCK_STREAM
) && NMFLAG(nmp
, NOCONNECT
)) {
827 /* no connection needed, just say it's already connected */
828 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
829 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
830 nso
->nso_flags
|= NSO_CONNECTED
;
831 nfs_socket_options(nmp
, nso
);
832 return 1; /* Socket is connected and setup */
833 } else if (!(nso
->nso_flags
& NSO_CONNECTING
)) {
834 /* initiate the connection */
835 nso
->nso_flags
|= NSO_CONNECTING
;
836 lck_mtx_unlock(&nso
->nso_lock
);
837 NFS_SOCK_DBG("nfs connect %s connecting socket %p\n",
838 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
839 error
= sock_connect(nso
->nso_so
, nso
->nso_saddr
, MSG_DONTWAIT
);
840 lck_mtx_lock(&nso
->nso_lock
);
841 if (error
&& (error
!= EINPROGRESS
)) {
842 nso
->nso_error
= error
;
843 nso
->nso_flags
|= NSO_DEAD
;
847 if (nso
->nso_flags
& NSO_CONNECTING
) {
848 /* check the connection */
849 if (sock_isconnected(nso
->nso_so
)) {
850 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
851 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
852 nso
->nso_flags
&= ~NSO_CONNECTING
;
853 nso
->nso_flags
|= NSO_CONNECTED
;
854 nfs_socket_options(nmp
, nso
);
855 return 1; /* Socket is connected and setup */
857 int optlen
= sizeof(error
);
859 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &error
, &optlen
);
860 if (error
) { /* we got an error on the socket */
861 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
862 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
864 printf("nfs connect socket error %d for %s\n",
865 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
867 nso
->nso_error
= error
;
868 nso
->nso_flags
|= NSO_DEAD
;
874 return 0; /* Waiting to be connected */
878 * nfs_connect_search_ping: Send a null proc on the nso socket.
881 nfs_connect_search_ping(struct nfsmount
*nmp
, struct nfs_socket
*nso
, struct timeval
*now
)
883 /* initiate a NULL RPC request */
884 uint64_t xid
= nso
->nso_pingxid
;
885 mbuf_t m
, mreq
= NULL
;
887 size_t reqlen
, sentlen
;
888 uint32_t vers
= nso
->nso_version
;
892 if (nso
->nso_protocol
== PMAPPROG
) {
893 vers
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
894 } else if (nso
->nso_protocol
== NFS_PROG
) {
895 vers
= PVER2MAJOR(nmp
->nm_max_vers
);
898 lck_mtx_unlock(&nso
->nso_lock
);
899 error
= nfsm_rpchead2(nmp
, nso
->nso_sotype
, nso
->nso_protocol
, vers
, 0, RPCAUTH_SYS
,
900 vfs_context_ucred(vfs_context_kernel()), NULL
, NULL
, &xid
, &mreq
);
901 lck_mtx_lock(&nso
->nso_lock
);
903 nso
->nso_flags
|= NSO_PINGING
;
904 nso
->nso_pingxid
= R_XID32(xid
);
905 nso
->nso_reqtimestamp
= now
->tv_sec
;
906 bzero(&msg
, sizeof(msg
));
907 if ((nso
->nso_sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
)) {
908 msg
.msg_name
= nso
->nso_saddr
;
909 msg
.msg_namelen
= nso
->nso_saddr
->sa_len
;
911 for (reqlen
= 0, m
= mreq
; m
; m
= mbuf_next(m
)) {
912 reqlen
+= mbuf_len(m
);
914 lck_mtx_unlock(&nso
->nso_lock
);
915 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreq
, 0, &sentlen
);
916 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
917 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
918 lck_mtx_lock(&nso
->nso_lock
);
919 if (!error
&& (sentlen
!= reqlen
)) {
924 nso
->nso_error
= error
;
925 nso
->nso_flags
|= NSO_DEAD
;
933 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
934 * Set the nfs socket protocol and version if needed.
937 nfs_connect_search_socket_found(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct nfs_socket
*nso
)
939 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
940 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
941 if (!nso
->nso_version
) {
942 /* If the version isn't set, the default must have worked. */
943 if (nso
->nso_protocol
== PMAPPROG
) {
944 nso
->nso_version
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
946 if (nso
->nso_protocol
== NFS_PROG
) {
947 nso
->nso_version
= PVER2MAJOR(nmp
->nm_max_vers
);
950 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
956 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
957 * the list. Dead socket are then destroyed.
960 nfs_connect_search_socket_reap(struct nfsmount
*nmp __unused
, struct nfs_socket_search
*nss
, struct timeval
*now
)
962 struct nfs_socket
*nso
, *nsonext
;
964 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
965 lck_mtx_lock(&nso
->nso_lock
);
966 if (now
->tv_sec
>= (nso
->nso_timestamp
+ nss
->nss_timeo
)) {
968 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
969 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
970 nso
->nso_error
= ETIMEDOUT
;
971 nso
->nso_flags
|= NSO_DEAD
;
973 if (!(nso
->nso_flags
& NSO_DEAD
)) {
974 lck_mtx_unlock(&nso
->nso_lock
);
977 lck_mtx_unlock(&nso
->nso_lock
);
978 NFS_SOCK_DBG("nfs connect %s reaping socket %p %d\n",
979 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, nso
->nso_error
);
980 nfs_socket_search_update_error(nss
, nso
->nso_error
);
981 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
983 nfs_socket_destroy(nso
);
984 /* If there are more sockets to try, force the starting of another socket */
985 if (nss
->nss_addrcnt
> 0) {
992 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
995 nfs_connect_search_check(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
999 /* log a warning if connect is taking a while */
1000 if (((now
->tv_sec
- nss
->nss_timestamp
) >= 8) && ((nss
->nss_flags
& (NSS_VERBOSE
| NSS_WARNED
)) == NSS_VERBOSE
)) {
1001 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1002 nss
->nss_flags
|= NSS_WARNED
;
1004 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
1007 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 0))) {
1011 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1012 if (nss
->nss_last
>= 0) {
1013 tsleep(nss
, PSOCK
, "nfs_connect_search_wait", hz
);
1021 * Continue the socket search until we have something to report.
1024 nfs_connect_search_loop(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
)
1026 struct nfs_socket
*nso
;
1029 int verbose
= (nss
->nss_flags
& NSS_VERBOSE
);
1033 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, now
.tv_sec
);
1035 /* add a new socket to the socket list if needed and available */
1036 error
= nfs_connect_search_new_socket(nmp
, nss
, &now
);
1038 NFS_SOCK_DBG("nfs connect returned %d\n", error
);
1042 /* check each active socket on the list and try to push it along */
1043 TAILQ_FOREACH(nso
, &nss
->nss_socklist
, nso_link
) {
1044 lck_mtx_lock(&nso
->nso_lock
);
1046 /* If not connected connect it */
1047 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
1048 if (!nfs_connect_search_socket_connect(nmp
, nso
, verbose
)) {
1049 lck_mtx_unlock(&nso
->nso_lock
);
1054 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1055 if (!(nso
->nso_flags
& (NSO_PINGING
| NSO_VERIFIED
)) ||
1056 ((nso
->nso_sotype
== SOCK_DGRAM
) && (now
.tv_sec
>= nso
->nso_reqtimestamp
+ 2))) {
1057 if (!nfs_connect_search_ping(nmp
, nso
, &now
)) {
1058 lck_mtx_unlock(&nso
->nso_lock
);
1063 /* Has the socket been verified by the up call routine? */
1064 if (nso
->nso_flags
& NSO_VERIFIED
) {
1065 /* WOOHOO!! This socket looks good! */
1066 nfs_connect_search_socket_found(nmp
, nss
, nso
);
1067 lck_mtx_unlock(&nso
->nso_lock
);
1070 lck_mtx_unlock(&nso
->nso_lock
);
1073 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1074 nfs_connect_search_socket_reap(nmp
, nss
, &now
);
1077 * Keep looping if we haven't found a socket yet and we have more
1078 * sockets to (continue to) try.
1081 if (!nss
->nss_sock
&& (!TAILQ_EMPTY(&nss
->nss_socklist
) || nss
->nss_addrcnt
)) {
1082 error
= nfs_connect_search_check(nmp
, nss
, &now
);
1088 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1093 * Initialize a new NFS connection.
1095 * Search for a location to connect a socket to and initialize the connection.
1097 * An NFS mount may have multiple locations/servers/addresses available.
1098 * We attempt to connect to each one asynchronously and will start
1099 * several sockets in parallel if other locations are slow to answer.
1100 * We'll use the first NFS socket we can successfully set up.
1102 * The search may involve contacting the portmapper service first.
1104 * A mount's initial connection may require negotiating some parameters such
1105 * as socket type and NFS version.
1109 nfs_connect(struct nfsmount
*nmp
, int verbose
, int timeo
)
1111 struct nfs_socket_search nss
;
1112 struct nfs_socket
*nso
, *nsonfs
;
1113 struct sockaddr_storage ss
;
1114 struct sockaddr
*saddr
, *oldsaddr
;
1116 struct timeval now
, start
;
1117 int error
, savederror
, nfsvers
;
1119 uint8_t sotype
= nmp
->nm_sotype
? nmp
->nm_sotype
: SOCK_STREAM
;
1120 fhandle_t
*fh
= NULL
;
1125 /* paranoia... check that we have at least one address in the locations */
1127 for (loc
= 0; loc
< nmp
->nm_locations
.nl_numlocs
; loc
++) {
1128 for (serv
= 0; serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
; serv
++) {
1129 addrtotal
+= nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
;
1130 if (nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
== 0) {
1131 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1132 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1133 nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_name
);
1138 if (addrtotal
== 0) {
1139 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1140 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1143 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1144 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, addrtotal
);
1147 lck_mtx_lock(&nmp
->nm_lock
);
1148 nmp
->nm_sockflags
|= NMSOCK_CONNECTING
;
1150 lck_mtx_unlock(&nmp
->nm_lock
);
1151 microuptime(&start
);
1152 savederror
= error
= 0;
1155 /* initialize socket search state */
1156 bzero(&nss
, sizeof(nss
));
1157 nss
.nss_addrcnt
= addrtotal
;
1158 nss
.nss_error
= savederror
;
1159 TAILQ_INIT(&nss
.nss_socklist
);
1160 nss
.nss_sotype
= sotype
;
1161 nss
.nss_startloc
= nmp
->nm_locations
.nl_current
;
1162 nss
.nss_timestamp
= start
.tv_sec
;
1163 nss
.nss_timeo
= timeo
;
1165 nss
.nss_flags
|= NSS_VERBOSE
;
1168 /* First time connecting, we may need to negotiate some things */
1169 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1170 if (!nmp
->nm_vers
) {
1171 /* No NFS version specified... */
1172 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1173 if (PVER2MAJOR(nmp
->nm_max_vers
) >= NFS_VER4
&& tryv4
) {
1174 nss
.nss_port
= NFS_PORT
;
1175 nss
.nss_protocol
= NFS_PROG
;
1176 nss
.nss_version
= 4;
1177 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1179 /* ...connect to portmapper first if we (may) need any ports. */
1180 nss
.nss_port
= PMAPPORT
;
1181 nss
.nss_protocol
= PMAPPROG
;
1182 nss
.nss_version
= 0;
1185 /* ...connect to NFS port first. */
1186 nss
.nss_port
= nmp
->nm_nfsport
;
1187 nss
.nss_protocol
= NFS_PROG
;
1188 nss
.nss_version
= 0;
1190 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1192 /* For NFSv4, we use the given (or default) port. */
1193 nss
.nss_port
= nmp
->nm_nfsport
? nmp
->nm_nfsport
: NFS_PORT
;
1194 nss
.nss_protocol
= NFS_PROG
;
1195 nss
.nss_version
= 4;
1197 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1198 * if no port is specified on the mount;
1199 * Note nm_vers is set so we will only try NFS_VER4.
1201 if (!nmp
->nm_nfsport
) {
1202 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1205 nss
.nss_port
= PMAPPORT
;
1206 nss
.nss_protocol
= PMAPPROG
;
1207 nss
.nss_version
= 0;
1210 /* For NFSv3/v2... */
1211 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1212 /* ...connect to portmapper first if we need any ports. */
1213 nss
.nss_port
= PMAPPORT
;
1214 nss
.nss_protocol
= PMAPPROG
;
1215 nss
.nss_version
= 0;
1217 /* ...connect to NFS port first. */
1218 nss
.nss_port
= nmp
->nm_nfsport
;
1219 nss
.nss_protocol
= NFS_PROG
;
1220 nss
.nss_version
= nmp
->nm_vers
;
1223 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1224 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1225 nss
.nss_protocol
, nss
.nss_version
);
1227 /* we've connected before, just connect to NFS port */
1228 if (!nmp
->nm_nfsport
) {
1229 /* need to ask portmapper which port that would be */
1230 nss
.nss_port
= PMAPPORT
;
1231 nss
.nss_protocol
= PMAPPROG
;
1232 nss
.nss_version
= 0;
1234 nss
.nss_port
= nmp
->nm_nfsport
;
1235 nss
.nss_protocol
= NFS_PROG
;
1236 nss
.nss_version
= nmp
->nm_vers
;
1238 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1239 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1240 nss
.nss_protocol
, nss
.nss_version
);
1243 /* Set next location to first valid location. */
1244 /* If start location is invalid, find next location. */
1245 nss
.nss_nextloc
= nss
.nss_startloc
;
1246 if ((nss
.nss_nextloc
.nli_serv
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servcount
) ||
1247 (nss
.nss_nextloc
.nli_addr
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servers
[nss
.nss_nextloc
.nli_serv
]->ns_addrcount
)) {
1248 nfs_location_next(&nmp
->nm_locations
, &nss
.nss_nextloc
);
1249 if (!nfs_location_index_cmp(&nss
.nss_nextloc
, &nss
.nss_startloc
)) {
1250 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1251 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1259 error
= nfs_connect_search_loop(nmp
, &nss
);
1260 if (error
|| !nss
.nss_sock
) {
1262 nfs_socket_search_cleanup(&nss
);
1263 if (nss
.nss_flags
& NSS_FALLBACK2PMAP
) {
1265 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1266 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1270 if (!error
&& (nss
.nss_sotype
== SOCK_STREAM
) && !nmp
->nm_sotype
&& (nmp
->nm_vers
< NFS_VER4
)) {
1272 sotype
= SOCK_DGRAM
;
1273 savederror
= nss
.nss_error
;
1274 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1275 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1279 error
= nss
.nss_error
? nss
.nss_error
: ETIMEDOUT
;
1281 lck_mtx_lock(&nmp
->nm_lock
);
1282 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1284 lck_mtx_unlock(&nmp
->nm_lock
);
1285 if (nss
.nss_flags
& NSS_WARNED
) {
1286 log(LOG_INFO
, "nfs_connect: socket connect aborted for %s\n",
1287 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1293 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1295 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1296 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1300 /* try to use nss_sock */
1302 nss
.nss_sock
= NULL
;
1304 /* We may be speaking to portmap first... to determine port(s). */
1305 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1306 port
= ntohs(((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
);
1308 port
= ntohs(((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
);
1310 if (port
== PMAPPORT
) {
1311 /* Use this portmapper port to get the port #s we need. */
1312 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1313 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1315 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1316 sock_setupcall(nso
->nso_so
, NULL
, NULL
);
1318 /* Set up socket address and port for NFS socket. */
1319 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1321 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1322 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: PVER2MAJOR(nmp
->nm_max_vers
);
1323 if (!(port
= nmp
->nm_nfsport
)) {
1324 if (ss
.ss_family
== AF_INET
) {
1325 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
1326 } else if (ss
.ss_family
== AF_INET6
) {
1327 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
1329 for (; nfsvers
>= (int)PVER2MAJOR(nmp
->nm_min_vers
); nfsvers
--) {
1330 if (nmp
->nm_vers
&& nmp
->nm_vers
!= nfsvers
) {
1331 continue; /* Wrong version */
1333 if (nfsvers
== NFS_VER4
&& nso
->nso_sotype
== SOCK_DGRAM
) {
1334 continue; /* NFSv4 does not do UDP */
1336 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1337 nso
->nso_so
, NFS_PROG
, nfsvers
,
1338 (nso
->nso_sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
, timeo
);
1340 if (ss
.ss_family
== AF_INET
) {
1341 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1342 } else if (ss
.ss_family
== AF_INET6
) {
1343 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1346 error
= EPROGUNAVAIL
;
1348 if (port
== NFS_PORT
&& nfsvers
== NFS_VER4
&& tryv4
== 0) {
1349 continue; /* We already tried this */
1356 if (nfsvers
< (int)PVER2MAJOR(nmp
->nm_min_vers
) && error
== 0) {
1357 error
= EPROGUNAVAIL
;
1360 nfs_socket_search_update_error(&nss
, error
);
1361 nfs_socket_destroy(nso
);
1365 /* Create NFS protocol socket and add it to the list of sockets. */
1366 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1367 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nso
->nso_sotype
, port
,
1368 NFS_PROG
, nfsvers
, NMFLAG(nmp
, RESVPORT
), &nsonfs
);
1370 nfs_socket_search_update_error(&nss
, error
);
1371 nfs_socket_destroy(nso
);
1374 nsonfs
->nso_location
= nso
->nso_location
;
1375 nsonfs
->nso_wake
= &nss
;
1376 error
= sock_setupcall(nsonfs
->nso_so
, nfs_connect_upcall
, nsonfs
);
1378 nfs_socket_search_update_error(&nss
, error
);
1379 nfs_socket_destroy(nsonfs
);
1380 nfs_socket_destroy(nso
);
1383 TAILQ_INSERT_TAIL(&nss
.nss_socklist
, nsonfs
, nso_link
);
1385 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1386 /* Set up socket address and port for MOUNT socket. */
1388 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1389 port
= nmp
->nm_mountport
;
1390 if (ss
.ss_family
== AF_INET
) {
1391 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1392 } else if (ss
.ss_family
== AF_INET6
) {
1393 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1396 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1397 /* If NFS version is unknown, optimistically choose for NFSv3. */
1398 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1399 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1400 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1401 nso
->nso_so
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1404 if (ss
.ss_family
== AF_INET
) {
1405 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1406 } else if (ss
.ss_family
== AF_INET6
) {
1407 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1410 error
= EPROGUNAVAIL
;
1413 /* create sockaddr for MOUNT */
1415 MALLOC(nsonfs
->nso_saddr2
, struct sockaddr
*, ss
.ss_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
1417 if (!error
&& !nsonfs
->nso_saddr2
) {
1421 bcopy(&ss
, nsonfs
->nso_saddr2
, ss
.ss_len
);
1424 lck_mtx_lock(&nsonfs
->nso_lock
);
1425 nsonfs
->nso_error
= error
;
1426 nsonfs
->nso_flags
|= NSO_DEAD
;
1427 lck_mtx_unlock(&nsonfs
->nso_lock
);
1430 nfs_socket_destroy(nso
);
1434 /* nso is an NFS socket */
1435 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1437 /* If NFS version wasn't specified, it was determined during the connect. */
1438 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: (int)nso
->nso_version
;
1440 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1441 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1443 saddr
= nso
->nso_saddr2
;
1445 /* Need sockaddr for MOUNT port */
1446 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1447 port
= nmp
->nm_mountport
;
1448 if (ss
.ss_family
== AF_INET
) {
1449 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1450 } else if (ss
.ss_family
== AF_INET6
) {
1451 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1454 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1455 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1456 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1457 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1458 NULL
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1459 if (ss
.ss_family
== AF_INET
) {
1460 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1461 } else if (ss
.ss_family
== AF_INET6
) {
1462 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1467 saddr
= (struct sockaddr
*)&ss
;
1469 error
= EPROGUNAVAIL
;
1474 MALLOC(fh
, fhandle_t
*, sizeof(fhandle_t
), M_TEMP
, M_WAITOK
| M_ZERO
);
1477 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1479 if (!saddr
|| !fh
|| !path
) {
1487 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1491 nfs_socket_search_update_error(&nss
, error
);
1492 nfs_socket_destroy(nso
);
1495 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
, path
, MAXPATHLEN
, 1);
1496 error
= nfs3_mount_rpc(nmp
, saddr
, nso
->nso_sotype
, nfsvers
,
1497 path
, vfs_context_current(), timeo
, fh
, &nmp
->nm_servsec
);
1498 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1499 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1501 /* Make sure we can agree on a security flavor. */
1502 int o
, s
; /* indices into mount option and server security flavor lists */
1505 if ((nfsvers
== NFS_VER3
) && !nmp
->nm_servsec
.count
) {
1506 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1507 nmp
->nm_servsec
.count
= 1;
1508 nmp
->nm_servsec
.flavors
[0] = RPCAUTH_SYS
;
1510 if (nmp
->nm_sec
.count
) {
1511 /* Choose the first flavor in our list that the server supports. */
1512 if (!nmp
->nm_servsec
.count
) {
1513 /* we don't know what the server supports, just use our first choice */
1514 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[0];
1517 for (o
= 0; !found
&& (o
< nmp
->nm_sec
.count
); o
++) {
1518 for (s
= 0; !found
&& (s
< nmp
->nm_servsec
.count
); s
++) {
1519 if (nmp
->nm_sec
.flavors
[o
] == nmp
->nm_servsec
.flavors
[s
]) {
1520 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[o
];
1526 /* Choose the first one we support from the server's list. */
1527 if (!nmp
->nm_servsec
.count
) {
1528 nmp
->nm_auth
= RPCAUTH_SYS
;
1531 for (s
= 0; s
< nmp
->nm_servsec
.count
; s
++) {
1532 switch (nmp
->nm_servsec
.flavors
[s
]) {
1534 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1535 if (found
&& (nmp
->nm_auth
== RPCAUTH_NONE
)) {
1543 nmp
->nm_auth
= nmp
->nm_servsec
.flavors
[s
];
1550 error
= !found
? EAUTH
: 0;
1552 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1555 nfs_socket_search_update_error(&nss
, error
);
1558 nfs_socket_destroy(nso
);
1562 FREE(nmp
->nm_fh
, M_TEMP
);
1566 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_CALLUMNT
);
1569 /* put the real upcall in place */
1570 upcall
= (nso
->nso_sotype
== SOCK_STREAM
) ? nfs_tcp_rcv
: nfs_udp_rcv
;
1571 error
= sock_setupcall(nso
->nso_so
, upcall
, nmp
);
1573 nfs_socket_search_update_error(&nss
, error
);
1574 nfs_socket_destroy(nso
);
1578 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1579 /* set mntfromname to this location */
1580 if (!NM_OMATTR_GIVEN(nmp
, MNTFROM
)) {
1581 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
,
1582 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1583 sizeof(vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
), 0);
1585 /* some negotiated values need to remain unchanged for the life of the mount */
1586 if (!nmp
->nm_sotype
) {
1587 nmp
->nm_sotype
= nso
->nso_sotype
;
1589 if (!nmp
->nm_vers
) {
1590 nmp
->nm_vers
= nfsvers
;
1591 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1592 if ((nfsvers
>= NFS_VER4
) && !NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1593 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1594 port
= ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
1595 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
1596 port
= ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
1600 if (port
== NFS_PORT
) {
1601 nmp
->nm_nfsport
= NFS_PORT
;
1605 /* do some version-specific pre-mount set up */
1606 if (nmp
->nm_vers
>= NFS_VER4
) {
1608 nmp
->nm_mounttime
= ((uint64_t)now
.tv_sec
<< 32) | now
.tv_usec
;
1609 if (!NMFLAG(nmp
, NOCALLBACK
)) {
1610 nfs4_mount_callback_setup(nmp
);
1615 /* Initialize NFS socket state variables */
1616 lck_mtx_lock(&nmp
->nm_lock
);
1617 nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] =
1618 nmp
->nm_srtt
[3] = (NFS_TIMEO
<< 3);
1619 nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
1620 nmp
->nm_sdrtt
[3] = 0;
1621 if (nso
->nso_sotype
== SOCK_DGRAM
) {
1622 nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
1624 } else if (nso
->nso_sotype
== SOCK_STREAM
) {
1625 nmp
->nm_timeouts
= 0;
1627 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1628 nmp
->nm_sockflags
|= NMSOCK_SETUP
;
1629 /* move the socket to the mount structure */
1631 oldsaddr
= nmp
->nm_saddr
;
1632 nmp
->nm_saddr
= nso
->nso_saddr
;
1633 lck_mtx_unlock(&nmp
->nm_lock
);
1634 error
= nfs_connect_setup(nmp
);
1635 lck_mtx_lock(&nmp
->nm_lock
);
1636 nmp
->nm_sockflags
&= ~NMSOCK_SETUP
;
1638 nmp
->nm_sockflags
|= NMSOCK_READY
;
1639 wakeup(&nmp
->nm_sockflags
);
1642 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1643 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1644 nfs_socket_search_update_error(&nss
, error
);
1645 nmp
->nm_saddr
= oldsaddr
;
1646 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1647 /* undo settings made prior to setup */
1648 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_SOCKET_TYPE
)) {
1651 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_VERSION
)) {
1652 if (nmp
->nm_vers
>= NFS_VER4
) {
1653 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1654 nmp
->nm_nfsport
= 0;
1657 nfs4_mount_callback_shutdown(nmp
);
1659 if (IS_VALID_CRED(nmp
->nm_mcred
)) {
1660 kauth_cred_unref(&nmp
->nm_mcred
);
1662 bzero(&nmp
->nm_un
, sizeof(nmp
->nm_un
));
1667 lck_mtx_unlock(&nmp
->nm_lock
);
1669 nfs_socket_destroy(nso
);
1673 /* update current location */
1674 if ((nmp
->nm_locations
.nl_current
.nli_flags
& NLI_VALID
) &&
1675 (nmp
->nm_locations
.nl_current
.nli_serv
!= nso
->nso_location
.nli_serv
)) {
1676 /* server has changed, we should initiate failover/recovery */
1679 nmp
->nm_locations
.nl_current
= nso
->nso_location
;
1680 nmp
->nm_locations
.nl_current
.nli_flags
|= NLI_VALID
;
1682 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1683 /* We have now successfully connected... make a note of it. */
1684 nmp
->nm_sockflags
|= NMSOCK_HASCONNECTED
;
1687 lck_mtx_unlock(&nmp
->nm_lock
);
1689 FREE(oldsaddr
, M_SONAME
);
1692 if (nss
.nss_flags
& NSS_WARNED
) {
1693 log(LOG_INFO
, "nfs_connect: socket connect completed for %s\n",
1694 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1698 nfs_socket_search_cleanup(&nss
);
1703 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1705 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1710 /* setup & confirm socket connection is functional */
1712 nfs_connect_setup(struct nfsmount
*nmp
)
1716 if (nmp
->nm_vers
>= NFS_VER4
) {
1717 if (nmp
->nm_state
& NFSSTA_CLIENTID
) {
1718 /* first, try to renew our current state */
1719 error
= nfs4_renew(nmp
, R_SETUP
);
1720 if ((error
== NFSERR_ADMIN_REVOKED
) ||
1721 (error
== NFSERR_CB_PATH_DOWN
) ||
1722 (error
== NFSERR_EXPIRED
) ||
1723 (error
== NFSERR_LEASE_MOVED
) ||
1724 (error
== NFSERR_STALE_CLIENTID
)) {
1725 lck_mtx_lock(&nmp
->nm_lock
);
1726 nfs_need_recover(nmp
, error
);
1727 lck_mtx_unlock(&nmp
->nm_lock
);
1730 error
= nfs4_setclientid(nmp
);
1736 * NFS socket reconnect routine:
1737 * Called when a connection is broken.
1738 * - disconnect the old socket
1739 * - nfs_connect() again
1740 * - set R_MUSTRESEND for all outstanding requests on mount point
1741 * If this fails the mount point is DEAD!
1744 nfs_reconnect(struct nfsmount
*nmp
)
1748 thread_t thd
= current_thread();
1749 int error
, wentdown
= 0, verbose
= 1;
1754 lastmsg
= now
.tv_sec
- (nmp
->nm_tprintf_delay
- nmp
->nm_tprintf_initial_delay
);
1756 nfs_disconnect(nmp
);
1759 lck_mtx_lock(&nmp
->nm_lock
);
1760 timeo
= nfs_is_squishy(nmp
) ? 8 : 30;
1761 lck_mtx_unlock(&nmp
->nm_lock
);
1763 while ((error
= nfs_connect(nmp
, verbose
, timeo
))) {
1765 nfs_disconnect(nmp
);
1766 if ((error
== EINTR
) || (error
== ERESTART
)) {
1773 if ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
) {
1774 lastmsg
= now
.tv_sec
;
1775 nfs_down(nmp
, thd
, error
, NFSSTA_TIMEO
, "can not connect", 0);
1778 lck_mtx_lock(&nmp
->nm_lock
);
1779 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1780 /* we're not yet completely mounted and */
1781 /* we can't reconnect, so we fail */
1782 lck_mtx_unlock(&nmp
->nm_lock
);
1783 NFS_SOCK_DBG("Not mounted returning %d\n", error
);
1787 if (nfs_mount_check_dead_timeout(nmp
)) {
1788 nfs_mount_make_zombie(nmp
);
1789 lck_mtx_unlock(&nmp
->nm_lock
);
1793 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1794 lck_mtx_unlock(&nmp
->nm_lock
);
1797 lck_mtx_unlock(&nmp
->nm_lock
);
1798 tsleep(nfs_reconnect
, PSOCK
, "nfs_reconnect_delay", 2 * hz
);
1799 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1805 nfs_up(nmp
, thd
, NFSSTA_TIMEO
, "connected");
1809 * Loop through outstanding request list and mark all requests
1810 * as needing a resend. (Though nfs_need_reconnect() probably
1811 * marked them all already.)
1813 lck_mtx_lock(nfs_request_mutex
);
1814 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1815 if (rq
->r_nmp
== nmp
) {
1816 lck_mtx_lock(&rq
->r_mtx
);
1817 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1818 rq
->r_flags
|= R_MUSTRESEND
;
1821 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
1822 nfs_asyncio_resend(rq
);
1825 lck_mtx_unlock(&rq
->r_mtx
);
1828 lck_mtx_unlock(nfs_request_mutex
);
1833 * NFS disconnect. Clean up and unlink.
1836 nfs_disconnect(struct nfsmount
*nmp
)
1838 struct nfs_socket
*nso
;
1840 lck_mtx_lock(&nmp
->nm_lock
);
1843 struct timespec ts
= { 1, 0 };
1844 if (nmp
->nm_state
& NFSSTA_SENDING
) { /* wait for sending to complete */
1845 nmp
->nm_state
|= NFSSTA_WANTSND
;
1846 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitsending", &ts
);
1849 if (nmp
->nm_sockflags
& NMSOCK_POKE
) { /* wait for poking to complete */
1850 msleep(&nmp
->nm_sockflags
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitpoke", &ts
);
1853 nmp
->nm_sockflags
|= NMSOCK_DISCONNECTING
;
1854 nmp
->nm_sockflags
&= ~NMSOCK_READY
;
1857 if (nso
->nso_saddr
== nmp
->nm_saddr
) {
1858 nso
->nso_saddr
= NULL
;
1860 lck_mtx_unlock(&nmp
->nm_lock
);
1861 nfs_socket_destroy(nso
);
1862 lck_mtx_lock(&nmp
->nm_lock
);
1863 nmp
->nm_sockflags
&= ~NMSOCK_DISCONNECTING
;
1864 lck_mtx_unlock(&nmp
->nm_lock
);
1866 lck_mtx_unlock(&nmp
->nm_lock
);
1871 * mark an NFS mount as needing a reconnect/resends.
1874 nfs_need_reconnect(struct nfsmount
*nmp
)
1878 lck_mtx_lock(&nmp
->nm_lock
);
1879 nmp
->nm_sockflags
&= ~(NMSOCK_READY
| NMSOCK_SETUP
);
1880 lck_mtx_unlock(&nmp
->nm_lock
);
1883 * Loop through outstanding request list and
1884 * mark all requests as needing a resend.
1886 lck_mtx_lock(nfs_request_mutex
);
1887 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1888 if (rq
->r_nmp
== nmp
) {
1889 lck_mtx_lock(&rq
->r_mtx
);
1890 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1891 rq
->r_flags
|= R_MUSTRESEND
;
1894 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
1895 nfs_asyncio_resend(rq
);
1898 lck_mtx_unlock(&rq
->r_mtx
);
1901 lck_mtx_unlock(nfs_request_mutex
);
1906 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
1909 nfs_mount_sock_thread(void *arg
, __unused wait_result_t wr
)
1911 struct nfsmount
*nmp
= arg
;
1912 struct timespec ts
= { 30, 0 };
1913 thread_t thd
= current_thread();
1916 int error
, dofinish
;
1918 int do_reconnect_sleep
= 0;
1920 lck_mtx_lock(&nmp
->nm_lock
);
1921 while (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
1922 !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
1923 !LIST_EMPTY(&nmp
->nm_monlist
) ||
1924 nmp
->nm_deadto_start
||
1925 (nmp
->nm_state
& NFSSTA_RECOVER
) ||
1926 ((nmp
->nm_vers
>= NFS_VER4
) && !TAILQ_EMPTY(&nmp
->nm_dreturnq
))) {
1927 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
1930 /* do reconnect, if necessary */
1931 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
1932 if (nmp
->nm_reconnect_start
<= 0) {
1934 nmp
->nm_reconnect_start
= now
.tv_sec
;
1936 lck_mtx_unlock(&nmp
->nm_lock
);
1937 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1939 * XXX We don't want to call reconnect again right away if returned errors
1940 * before that may not have blocked. This has caused spamming null procs
1941 * from machines in the pass.
1943 if (do_reconnect_sleep
) {
1944 tsleep(nfs_mount_sock_thread
, PSOCK
, "nfs_reconnect_sock_thread_delay", hz
);
1946 error
= nfs_reconnect(nmp
);
1949 if (error
== EIO
|| error
== EINTR
) {
1950 lvl
= (do_reconnect_sleep
++ % 600) ? 7 : 0;
1952 nfs_printf(NFS_FAC_SOCK
, lvl
, "nfs reconnect %s: returned %d\n",
1953 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1955 nmp
->nm_reconnect_start
= 0;
1956 do_reconnect_sleep
= 0;
1958 lck_mtx_lock(&nmp
->nm_lock
);
1960 if ((nmp
->nm_sockflags
& NMSOCK_READY
) &&
1961 (nmp
->nm_state
& NFSSTA_RECOVER
) &&
1962 !(nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
1963 !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
1964 /* perform state recovery */
1965 lck_mtx_unlock(&nmp
->nm_lock
);
1967 lck_mtx_lock(&nmp
->nm_lock
);
1969 /* handle NFSv4 delegation returns */
1970 while ((nmp
->nm_vers
>= NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
1971 (nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
) &&
1972 ((np
= TAILQ_FIRST(&nmp
->nm_dreturnq
)))) {
1973 lck_mtx_unlock(&nmp
->nm_lock
);
1974 nfs4_delegation_return(np
, R_RECOVER
, thd
, nmp
->nm_mcred
);
1975 lck_mtx_lock(&nmp
->nm_lock
);
1977 /* do resends, if necessary/possible */
1978 while ((((nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) ||
1979 (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) &&
1980 ((req
= TAILQ_FIRST(&nmp
->nm_resendq
)))) {
1981 if (req
->r_resendtime
) {
1984 while (req
&& !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) && req
->r_resendtime
&& (now
.tv_sec
< req
->r_resendtime
)) {
1985 req
= TAILQ_NEXT(req
, r_rchain
);
1990 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
1991 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
1992 lck_mtx_unlock(&nmp
->nm_lock
);
1993 lck_mtx_lock(&req
->r_mtx
);
1994 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
1995 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
1996 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1997 req
->r_flags
&= ~R_RESENDQ
;
1999 lck_mtx_unlock(&req
->r_mtx
);
2001 nfs_asyncio_finish(req
);
2003 nfs_request_rele(req
);
2004 lck_mtx_lock(&nmp
->nm_lock
);
2007 if ((req
->r_flags
& R_RESTART
) || nfs_request_using_gss(req
)) {
2008 req
->r_flags
&= ~R_RESTART
;
2009 req
->r_resendtime
= 0;
2010 lck_mtx_unlock(&req
->r_mtx
);
2011 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2012 nfs_reqdequeue(req
);
2013 if (nfs_request_using_gss(req
)) {
2014 nfs_gss_clnt_rpcdone(req
);
2015 error
= nfs_gss_clnt_args_restore(req
);
2016 if (error
== ENEEDAUTH
) {
2020 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2021 nfs_request_using_gss(req
) ? " gss" : "", req
->r_procnum
, req
->r_xid
,
2022 req
->r_flags
, req
->r_rtt
);
2023 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2025 error
= nfs_request_add_header(req
);
2028 error
= nfs_request_send(req
, 0);
2030 lck_mtx_lock(&req
->r_mtx
);
2031 if (req
->r_flags
& R_RESENDQ
) {
2032 req
->r_flags
&= ~R_RESENDQ
;
2035 req
->r_error
= error
;
2038 dofinish
= error
&& req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2039 lck_mtx_unlock(&req
->r_mtx
);
2041 nfs_asyncio_finish(req
);
2043 nfs_request_rele(req
);
2044 lck_mtx_lock(&nmp
->nm_lock
);
2048 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2049 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
2050 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2052 req
->r_flags
|= R_SENDING
;
2053 lck_mtx_unlock(&req
->r_mtx
);
2054 error
= nfs_send(req
, 0);
2055 lck_mtx_lock(&req
->r_mtx
);
2057 if (req
->r_flags
& R_RESENDQ
) {
2058 req
->r_flags
&= ~R_RESENDQ
;
2061 lck_mtx_unlock(&req
->r_mtx
);
2062 nfs_request_rele(req
);
2063 lck_mtx_lock(&nmp
->nm_lock
);
2067 req
->r_error
= error
;
2068 if (req
->r_flags
& R_RESENDQ
) {
2069 req
->r_flags
&= ~R_RESENDQ
;
2072 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2073 lck_mtx_unlock(&req
->r_mtx
);
2075 nfs_asyncio_finish(req
);
2077 nfs_request_rele(req
);
2078 lck_mtx_lock(&nmp
->nm_lock
);
2080 if (nfs_mount_check_dead_timeout(nmp
)) {
2081 nfs_mount_make_zombie(nmp
);
2085 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
2088 /* check monitored nodes, if necessary/possible */
2089 if (!LIST_EMPTY(&nmp
->nm_monlist
)) {
2090 nmp
->nm_state
|= NFSSTA_MONITOR_SCAN
;
2091 LIST_FOREACH(np
, &nmp
->nm_monlist
, n_monlink
) {
2092 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2093 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2096 np
->n_mflag
|= NMMONSCANINPROG
;
2097 lck_mtx_unlock(&nmp
->nm_lock
);
2098 error
= nfs_getattr(np
, NULL
, vfs_context_kernel(), (NGA_UNCACHED
| NGA_MONITOR
));
2099 if (!error
&& ISSET(np
->n_flag
, NUPDATESIZE
)) { /* update quickly to avoid multiple events */
2100 nfs_data_update_size(np
, 0);
2102 lck_mtx_lock(&nmp
->nm_lock
);
2103 np
->n_mflag
&= ~NMMONSCANINPROG
;
2104 if (np
->n_mflag
& NMMONSCANWANT
) {
2105 np
->n_mflag
&= ~NMMONSCANWANT
;
2106 wakeup(&np
->n_mflag
);
2108 if (error
|| !(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2109 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2113 nmp
->nm_state
&= ~NFSSTA_MONITOR_SCAN
;
2114 if (nmp
->nm_state
& NFSSTA_UNMOUNTING
) {
2115 wakeup(&nmp
->nm_state
); /* let unmounting thread know scan is done */
2118 if ((nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
))) {
2119 if (nmp
->nm_deadto_start
|| !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
2120 (nmp
->nm_state
& NFSSTA_RECOVER
)) {
2125 msleep(&nmp
->nm_sockthd
, &nmp
->nm_lock
, PSOCK
, "nfssockthread", &ts
);
2129 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2130 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2131 (nmp
->nm_state
& NFSSTA_MOUNTED
) && NMFLAG(nmp
, CALLUMNT
) &&
2132 (nmp
->nm_vers
< NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2133 lck_mtx_unlock(&nmp
->nm_lock
);
2134 nfs3_umount_rpc(nmp
, vfs_context_kernel(),
2135 (nmp
->nm_sockflags
& NMSOCK_READY
) ? 6 : 2);
2136 lck_mtx_lock(&nmp
->nm_lock
);
2139 if (nmp
->nm_sockthd
== thd
) {
2140 nmp
->nm_sockthd
= NULL
;
2142 lck_mtx_unlock(&nmp
->nm_lock
);
2143 wakeup(&nmp
->nm_sockthd
);
2144 thread_terminate(thd
);
2147 /* start or wake a mount's socket thread */
2149 nfs_mount_sock_thread_wake(struct nfsmount
*nmp
)
2151 if (nmp
->nm_sockthd
) {
2152 wakeup(&nmp
->nm_sockthd
);
2153 } else if (kernel_thread_start(nfs_mount_sock_thread
, nmp
, &nmp
->nm_sockthd
) == KERN_SUCCESS
) {
2154 thread_deallocate(nmp
->nm_sockthd
);
2159 * Check if we should mark the mount dead because the
2160 * unresponsive mount has reached the dead timeout.
2161 * (must be called with nmp locked)
2164 nfs_mount_check_dead_timeout(struct nfsmount
*nmp
)
2168 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2171 if (nmp
->nm_deadto_start
== 0) {
2174 nfs_is_squishy(nmp
);
2175 if (nmp
->nm_curdeadtimeout
<= 0) {
2179 if ((now
.tv_sec
- nmp
->nm_deadto_start
) < nmp
->nm_curdeadtimeout
) {
2186 * Call nfs_mount_zombie to remove most of the
2187 * nfs state for the mount, and then ask to be forcibly unmounted.
2189 * Assumes the nfs mount structure lock nm_lock is held.
2193 nfs_mount_make_zombie(struct nfsmount
*nmp
)
2201 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2205 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
2206 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
2207 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
2208 lck_mtx_unlock(&nmp
->nm_lock
);
2209 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
2210 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
2211 lck_mtx_lock(&nmp
->nm_lock
);
2216 * NFS callback channel socket state
2218 struct nfs_callback_socket
{
2219 TAILQ_ENTRY(nfs_callback_socket
) ncbs_link
;
2220 socket_t ncbs_so
; /* the socket */
2221 struct sockaddr_storage ncbs_saddr
; /* socket address */
2222 struct nfs_rpc_record_state ncbs_rrs
; /* RPC record parsing state */
2223 time_t ncbs_stamp
; /* last accessed at */
2224 uint32_t ncbs_flags
; /* see below */
2226 #define NCBSOCK_UPCALL 0x0001
2227 #define NCBSOCK_UPCALLWANT 0x0002
2228 #define NCBSOCK_DEAD 0x0004
2231 * NFS callback channel state
2233 * One listening socket for accepting socket connections from servers and
2234 * a list of connected sockets to handle callback requests on.
2235 * Mounts registered with the callback channel are assigned IDs and
2236 * put on a list so that the callback request handling code can match
2237 * the requests up with mounts.
2239 socket_t nfs4_cb_so
= NULL
;
2240 socket_t nfs4_cb_so6
= NULL
;
2241 in_port_t nfs4_cb_port
= 0;
2242 in_port_t nfs4_cb_port6
= 0;
2243 uint32_t nfs4_cb_id
= 0;
2244 uint32_t nfs4_cb_so_usecount
= 0;
2245 TAILQ_HEAD(nfs4_cb_sock_list
, nfs_callback_socket
) nfs4_cb_socks
;
2246 TAILQ_HEAD(nfs4_cb_mount_list
, nfsmount
) nfs4_cb_mounts
;
2248 int nfs4_cb_handler(struct nfs_callback_socket
*, mbuf_t
);
2251 * Set up the callback channel for the NFS mount.
2253 * Initializes the callback channel socket state and
2254 * assigns a callback ID to the mount.
2257 nfs4_mount_callback_setup(struct nfsmount
*nmp
)
2259 struct sockaddr_in sin
;
2260 struct sockaddr_in6 sin6
;
2262 socket_t so6
= NULL
;
2263 struct timeval timeo
;
2267 lck_mtx_lock(nfs_global_mutex
);
2268 if (nfs4_cb_id
== 0) {
2269 TAILQ_INIT(&nfs4_cb_mounts
);
2270 TAILQ_INIT(&nfs4_cb_socks
);
2273 nmp
->nm_cbid
= nfs4_cb_id
++;
2274 if (nmp
->nm_cbid
== 0) {
2275 nmp
->nm_cbid
= nfs4_cb_id
++;
2277 nfs4_cb_so_usecount
++;
2278 TAILQ_INSERT_HEAD(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2281 lck_mtx_unlock(nfs_global_mutex
);
2286 error
= sock_socket(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so
);
2288 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv4 socket\n", error
);
2293 sock_setsockopt(so
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2294 sin
.sin_len
= sizeof(struct sockaddr_in
);
2295 sin
.sin_family
= AF_INET
;
2296 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
2297 sin
.sin_port
= htons(nfs_callback_port
); /* try to use specified port */
2298 error
= sock_bind(so
, (struct sockaddr
*)&sin
);
2300 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv4 socket\n", error
);
2303 error
= sock_getsockname(so
, (struct sockaddr
*)&sin
, sin
.sin_len
);
2305 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv4 socket port\n", error
);
2308 nfs4_cb_port
= ntohs(sin
.sin_port
);
2310 error
= sock_listen(so
, 32);
2312 log(LOG_INFO
, "nfs callback setup: error %d on IPv4 listen\n", error
);
2316 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2319 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2321 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error
);
2323 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2325 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error
);
2327 sock_setsockopt(so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2328 sock_setsockopt(so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2329 sock_setsockopt(so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2333 error
= sock_socket(AF_INET6
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so6
);
2335 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv6 socket\n", error
);
2340 sock_setsockopt(so6
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2341 sock_setsockopt(so6
, IPPROTO_IPV6
, IPV6_V6ONLY
, &on
, sizeof(on
));
2342 /* try to use specified port or same port as IPv4 */
2343 port
= nfs_callback_port
? nfs_callback_port
: nfs4_cb_port
;
2345 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2346 sin6
.sin6_family
= AF_INET6
;
2347 sin6
.sin6_addr
= in6addr_any
;
2348 sin6
.sin6_port
= htons(port
);
2349 error
= sock_bind(so6
, (struct sockaddr
*)&sin6
);
2351 if (port
!= nfs_callback_port
) {
2352 /* if we simply tried to match the IPv4 port, then try any port */
2354 goto ipv6_bind_again
;
2356 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv6 socket\n", error
);
2359 error
= sock_getsockname(so6
, (struct sockaddr
*)&sin6
, sin6
.sin6_len
);
2361 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv6 socket port\n", error
);
2364 nfs4_cb_port6
= ntohs(sin6
.sin6_port
);
2366 error
= sock_listen(so6
, 32);
2368 log(LOG_INFO
, "nfs callback setup: error %d on IPv6 listen\n", error
);
2372 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2375 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2377 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error
);
2379 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2381 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error
);
2383 sock_setsockopt(so6
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2384 sock_setsockopt(so6
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2385 sock_setsockopt(so6
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2390 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2391 lck_mtx_unlock(nfs_global_mutex
);
2393 sock_shutdown(so
, SHUT_RDWR
);
2397 sock_shutdown(so6
, SHUT_RDWR
);
2401 lck_mtx_unlock(nfs_global_mutex
);
2406 * Shut down the callback channel for the NFS mount.
2408 * Clears the mount's callback ID and releases the mounts
2409 * reference on the callback socket. Last reference dropped
2410 * will also shut down the callback socket(s).
2413 nfs4_mount_callback_shutdown(struct nfsmount
*nmp
)
2415 struct nfs_callback_socket
*ncbsp
;
2417 struct nfs4_cb_sock_list cb_socks
;
2418 struct timespec ts
= {1, 0};
2420 lck_mtx_lock(nfs_global_mutex
);
2421 TAILQ_REMOVE(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2422 /* wait for any callbacks in progress to complete */
2423 while (nmp
->nm_cbrefs
) {
2424 msleep(&nmp
->nm_cbrefs
, nfs_global_mutex
, PSOCK
, "cbshutwait", &ts
);
2427 if (--nfs4_cb_so_usecount
) {
2428 lck_mtx_unlock(nfs_global_mutex
);
2433 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2434 TAILQ_INIT(&cb_socks
);
2435 TAILQ_CONCAT(&cb_socks
, &nfs4_cb_socks
, ncbs_link
);
2436 lck_mtx_unlock(nfs_global_mutex
);
2438 sock_shutdown(so
, SHUT_RDWR
);
2442 sock_shutdown(so6
, SHUT_RDWR
);
2445 while ((ncbsp
= TAILQ_FIRST(&cb_socks
))) {
2446 TAILQ_REMOVE(&cb_socks
, ncbsp
, ncbs_link
);
2447 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2448 sock_close(ncbsp
->ncbs_so
);
2449 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2450 FREE(ncbsp
, M_TEMP
);
2455 * Check periodically for stale/unused nfs callback sockets
2457 #define NFS4_CB_TIMER_PERIOD 30
2458 #define NFS4_CB_IDLE_MAX 300
2460 nfs4_callback_timer(__unused
void *param0
, __unused
void *param1
)
2462 struct nfs_callback_socket
*ncbsp
, *nextncbsp
;
2466 lck_mtx_lock(nfs_global_mutex
);
2467 if (TAILQ_EMPTY(&nfs4_cb_socks
)) {
2468 nfs4_callback_timer_on
= 0;
2469 lck_mtx_unlock(nfs_global_mutex
);
2473 TAILQ_FOREACH_SAFE(ncbsp
, &nfs4_cb_socks
, ncbs_link
, nextncbsp
) {
2474 if (!(ncbsp
->ncbs_flags
& NCBSOCK_DEAD
) &&
2475 (now
.tv_sec
< (ncbsp
->ncbs_stamp
+ NFS4_CB_IDLE_MAX
))) {
2478 TAILQ_REMOVE(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2479 lck_mtx_unlock(nfs_global_mutex
);
2480 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2481 sock_close(ncbsp
->ncbs_so
);
2482 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2483 FREE(ncbsp
, M_TEMP
);
2486 nfs4_callback_timer_on
= 1;
2487 nfs_interval_timer_start(nfs4_callback_timer_call
,
2488 NFS4_CB_TIMER_PERIOD
* 1000);
2489 lck_mtx_unlock(nfs_global_mutex
);
2493 * Accept a new callback socket.
2496 nfs4_cb_accept(socket_t so
, __unused
void *arg
, __unused
int waitflag
)
2498 socket_t newso
= NULL
;
2499 struct nfs_callback_socket
*ncbsp
;
2500 struct nfsmount
*nmp
;
2501 struct timeval timeo
, now
;
2502 int error
, on
= 1, ip
;
2504 if (so
== nfs4_cb_so
) {
2506 } else if (so
== nfs4_cb_so6
) {
2512 /* allocate/initialize a new nfs_callback_socket */
2513 MALLOC(ncbsp
, struct nfs_callback_socket
*, sizeof(struct nfs_callback_socket
), M_TEMP
, M_WAITOK
);
2515 log(LOG_ERR
, "nfs callback accept: no memory for new socket\n");
2518 bzero(ncbsp
, sizeof(*ncbsp
));
2519 ncbsp
->ncbs_saddr
.ss_len
= (ip
== 4) ? sizeof(struct sockaddr_in
) : sizeof(struct sockaddr_in6
);
2520 nfs_rpc_record_state_init(&ncbsp
->ncbs_rrs
);
2522 /* accept a new socket */
2523 error
= sock_accept(so
, (struct sockaddr
*)&ncbsp
->ncbs_saddr
,
2524 ncbsp
->ncbs_saddr
.ss_len
, MSG_DONTWAIT
,
2525 nfs4_cb_rcv
, ncbsp
, &newso
);
2527 log(LOG_INFO
, "nfs callback accept: error %d accepting IPv%d socket\n", error
, ip
);
2528 FREE(ncbsp
, M_TEMP
);
2532 /* set up the new socket */
2533 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2536 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2538 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error
, ip
);
2540 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2542 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error
, ip
);
2544 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2545 sock_setsockopt(newso
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2546 sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2547 sock_setsockopt(newso
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2549 ncbsp
->ncbs_so
= newso
;
2551 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2553 lck_mtx_lock(nfs_global_mutex
);
2555 /* add it to the list */
2556 TAILQ_INSERT_HEAD(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2558 /* verify it's from a host we have mounted */
2559 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2560 /* check if socket's source address matches this mount's server address */
2561 if (!nmp
->nm_saddr
) {
2564 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2568 if (!nmp
) { /* we don't want this socket, mark it dead */
2569 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2572 /* make sure the callback socket cleanup timer is running */
2573 /* (shorten the timer if we've got a socket we don't want) */
2574 if (!nfs4_callback_timer_on
) {
2575 nfs4_callback_timer_on
= 1;
2576 nfs_interval_timer_start(nfs4_callback_timer_call
,
2577 !nmp
? 500 : (NFS4_CB_TIMER_PERIOD
* 1000));
2578 } else if (!nmp
&& (nfs4_callback_timer_on
< 2)) {
2579 nfs4_callback_timer_on
= 2;
2580 thread_call_cancel(nfs4_callback_timer_call
);
2581 nfs_interval_timer_start(nfs4_callback_timer_call
, 500);
2584 lck_mtx_unlock(nfs_global_mutex
);
2588 * Receive mbufs from callback sockets into RPC records and process each record.
2589 * Detect connection has been closed and shut down.
2592 nfs4_cb_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
2594 struct nfs_callback_socket
*ncbsp
= arg
;
2595 struct timespec ts
= {1, 0};
2598 int error
= 0, recv
= 1;
2600 lck_mtx_lock(nfs_global_mutex
);
2601 while (ncbsp
->ncbs_flags
& NCBSOCK_UPCALL
) {
2602 /* wait if upcall is already in progress */
2603 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALLWANT
;
2604 msleep(ncbsp
, nfs_global_mutex
, PSOCK
, "cbupcall", &ts
);
2606 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALL
;
2607 lck_mtx_unlock(nfs_global_mutex
);
2609 /* loop while we make error-free progress */
2610 while (!error
&& recv
) {
2611 error
= nfs_rpc_record_read(so
, &ncbsp
->ncbs_rrs
, MSG_DONTWAIT
, &recv
, &m
);
2612 if (m
) { /* handle the request */
2613 error
= nfs4_cb_handler(ncbsp
, m
);
2617 /* note: no error and no data indicates server closed its end */
2618 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
2620 * Socket is either being closed or should be.
2621 * We can't close the socket in the context of the upcall.
2622 * So we mark it as dead and leave it for the cleanup timer to reap.
2624 ncbsp
->ncbs_stamp
= 0;
2625 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2628 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2631 lck_mtx_lock(nfs_global_mutex
);
2632 ncbsp
->ncbs_flags
&= ~NCBSOCK_UPCALL
;
2633 lck_mtx_unlock(nfs_global_mutex
);
2638 * Handle an NFS callback channel request.
2641 nfs4_cb_handler(struct nfs_callback_socket
*ncbsp
, mbuf_t mreq
)
2643 socket_t so
= ncbsp
->ncbs_so
;
2644 struct nfsm_chain nmreq
, nmrep
;
2645 mbuf_t mhead
= NULL
, mrest
= NULL
, m
;
2647 struct nfsmount
*nmp
;
2650 nfs_stateid stateid
;
2651 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], rbitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
, truncate
, attrbytes
;
2652 uint32_t val
, xid
, procnum
, taglen
, cbid
, numops
, op
, status
;
2653 uint32_t auth_type
, auth_len
;
2654 uint32_t numres
, *pnumres
;
2655 int error
= 0, replen
, len
;
2658 xid
= numops
= op
= status
= procnum
= taglen
= cbid
= 0;
2660 nfsm_chain_dissect_init(error
, &nmreq
, mreq
);
2661 nfsm_chain_get_32(error
, &nmreq
, xid
); // RPC XID
2662 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Call
2663 nfsm_assert(error
, (val
== RPC_CALL
), EBADRPC
);
2664 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Version
2665 nfsm_assert(error
, (val
== RPC_VER2
), ERPCMISMATCH
);
2666 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Program Number
2667 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG
), EPROGUNAVAIL
);
2668 nfsm_chain_get_32(error
, &nmreq
, val
); // NFS Callback Program Version Number
2669 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG_VERSION
), EPROGMISMATCH
);
2670 nfsm_chain_get_32(error
, &nmreq
, procnum
); // NFS Callback Procedure Number
2671 nfsm_assert(error
, (procnum
<= NFSPROC4_CB_COMPOUND
), EPROCUNAVAIL
);
2673 /* Handle authentication */
2674 /* XXX just ignore auth for now - handling kerberos may be tricky */
2675 nfsm_chain_get_32(error
, &nmreq
, auth_type
); // RPC Auth Flavor
2676 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // RPC Auth Length
2677 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2678 if (!error
&& (auth_len
> 0)) {
2679 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2681 nfsm_chain_adv(error
, &nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
2682 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // verifier length
2683 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2684 if (!error
&& (auth_len
> 0)) {
2685 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2694 case NFSPROC4_CB_NULL
:
2695 status
= NFSERR_RETVOID
;
2697 case NFSPROC4_CB_COMPOUND
:
2698 /* tag, minorversion, cb ident, numops, op array */
2699 nfsm_chain_get_32(error
, &nmreq
, taglen
); /* tag length */
2700 nfsm_assert(error
, (val
<= NFS4_OPAQUE_LIMIT
), EBADRPC
);
2702 /* start building the body of the response */
2703 nfsm_mbuf_get(error
, &mrest
, nfsm_rndup(taglen
) + 5 * NFSX_UNSIGNED
);
2704 nfsm_chain_init(&nmrep
, mrest
);
2706 /* copy tag from request to response */
2707 nfsm_chain_add_32(error
, &nmrep
, taglen
); /* tag length */
2708 for (len
= (int)taglen
; !error
&& (len
> 0); len
-= NFSX_UNSIGNED
) {
2709 nfsm_chain_get_32(error
, &nmreq
, val
);
2710 nfsm_chain_add_32(error
, &nmrep
, val
);
2713 /* insert number of results placeholder */
2715 nfsm_chain_add_32(error
, &nmrep
, numres
);
2716 pnumres
= (uint32_t*)(nmrep
.nmc_ptr
- NFSX_UNSIGNED
);
2718 nfsm_chain_get_32(error
, &nmreq
, val
); /* minorversion */
2719 nfsm_assert(error
, (val
== 0), NFSERR_MINOR_VERS_MISMATCH
);
2720 nfsm_chain_get_32(error
, &nmreq
, cbid
); /* callback ID */
2721 nfsm_chain_get_32(error
, &nmreq
, numops
); /* number of operations */
2723 if ((error
== EBADRPC
) || (error
== NFSERR_MINOR_VERS_MISMATCH
)) {
2725 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
2726 status
= NFSERR_RESOURCE
;
2728 status
= NFSERR_SERVERFAULT
;
2731 nfsm_chain_null(&nmrep
);
2734 /* match the callback ID to a registered mount */
2735 lck_mtx_lock(nfs_global_mutex
);
2736 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2737 if (nmp
->nm_cbid
!= cbid
) {
2740 /* verify socket's source address matches this mount's server address */
2741 if (!nmp
->nm_saddr
) {
2744 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2748 /* mark the NFS mount as busy */
2752 lck_mtx_unlock(nfs_global_mutex
);
2754 /* if no mount match, just drop socket. */
2756 nfsm_chain_null(&nmrep
);
2760 /* process ops, adding results to mrest */
2761 while (numops
> 0) {
2763 nfsm_chain_get_32(error
, &nmreq
, op
);
2768 case NFS_OP_CB_GETATTR
:
2769 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2771 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2772 bmlen
= NFS_ATTR_BITMAP_LEN
;
2773 nfsm_chain_get_bitmap(error
, &nmreq
, bitmap
, bmlen
);
2777 numops
= 0; /* don't process any more ops */
2779 /* find the node for the file handle */
2780 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2782 status
= NFSERR_BADHANDLE
;
2785 numops
= 0; /* don't process any more ops */
2788 nfsm_chain_add_32(error
, &nmrep
, op
);
2789 nfsm_chain_add_32(error
, &nmrep
, status
);
2790 if (!error
&& (status
== EBADRPC
)) {
2794 /* only allow returning size, change, and mtime attrs */
2795 NFS_CLEAR_ATTRIBUTES(&rbitmap
);
2797 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
2798 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_CHANGE
);
2799 attrbytes
+= 2 * NFSX_UNSIGNED
;
2801 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
2802 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_SIZE
);
2803 attrbytes
+= 2 * NFSX_UNSIGNED
;
2805 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2806 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_TIME_MODIFY
);
2807 attrbytes
+= 3 * NFSX_UNSIGNED
;
2809 nfsm_chain_add_bitmap(error
, &nmrep
, rbitmap
, NFS_ATTR_BITMAP_LEN
);
2810 nfsm_chain_add_32(error
, &nmrep
, attrbytes
);
2811 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
2812 nfsm_chain_add_64(error
, &nmrep
,
2813 np
->n_vattr
.nva_change
+ ((np
->n_flag
& NMODIFIED
) ? 1 : 0));
2815 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
2816 nfsm_chain_add_64(error
, &nmrep
, np
->n_size
);
2818 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2819 nfsm_chain_add_64(error
, &nmrep
, np
->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]);
2820 nfsm_chain_add_32(error
, &nmrep
, np
->n_vattr
.nva_timensec
[NFSTIME_MODIFY
]);
2822 nfs_node_unlock(np
);
2823 vnode_put(NFSTOV(np
));
2827 * If we hit an error building the reply, we can't easily back up.
2828 * So we'll just update the status and hope the server ignores the
2832 case NFS_OP_CB_RECALL
:
2833 // (STATEID, TRUNCATE, FH) -> (STATUS)
2835 nfsm_chain_get_stateid(error
, &nmreq
, &stateid
);
2836 nfsm_chain_get_32(error
, &nmreq
, truncate
);
2837 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2841 numops
= 0; /* don't process any more ops */
2843 /* find the node for the file handle */
2844 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2846 status
= NFSERR_BADHANDLE
;
2849 numops
= 0; /* don't process any more ops */
2850 } else if (!(np
->n_openflags
& N_DELEG_MASK
) ||
2851 bcmp(&np
->n_dstateid
, &stateid
, sizeof(stateid
))) {
2852 /* delegation stateid state doesn't match */
2853 status
= NFSERR_BAD_STATEID
;
2854 numops
= 0; /* don't process any more ops */
2856 if (!status
) { /* add node to recall queue, and wake socket thread */
2857 nfs4_delegation_return_enqueue(np
);
2860 nfs_node_unlock(np
);
2861 vnode_put(NFSTOV(np
));
2864 nfsm_chain_add_32(error
, &nmrep
, op
);
2865 nfsm_chain_add_32(error
, &nmrep
, status
);
2866 if (!error
&& (status
== EBADRPC
)) {
2870 case NFS_OP_CB_ILLEGAL
:
2872 nfsm_chain_add_32(error
, &nmrep
, NFS_OP_CB_ILLEGAL
);
2873 status
= NFSERR_OP_ILLEGAL
;
2874 nfsm_chain_add_32(error
, &nmrep
, status
);
2875 numops
= 0; /* don't process any more ops */
2881 if (!status
&& error
) {
2882 if (error
== EBADRPC
) {
2884 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
2885 status
= NFSERR_RESOURCE
;
2887 status
= NFSERR_SERVERFAULT
;
2892 /* Now, set the numres field */
2893 *pnumres
= txdr_unsigned(numres
);
2894 nfsm_chain_build_done(error
, &nmrep
);
2895 nfsm_chain_null(&nmrep
);
2897 /* drop the callback reference on the mount */
2898 lck_mtx_lock(nfs_global_mutex
);
2900 if (!nmp
->nm_cbid
) {
2901 wakeup(&nmp
->nm_cbrefs
);
2903 lck_mtx_unlock(nfs_global_mutex
);
2908 if (status
== EBADRPC
) {
2909 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
2912 /* build reply header */
2913 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mhead
);
2914 nfsm_chain_init(&nmrep
, mhead
);
2915 nfsm_chain_add_32(error
, &nmrep
, 0); /* insert space for an RPC record mark */
2916 nfsm_chain_add_32(error
, &nmrep
, xid
);
2917 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
2918 if ((status
== ERPCMISMATCH
) || (status
& NFSERR_AUTHERR
)) {
2919 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
2920 if (status
& NFSERR_AUTHERR
) {
2921 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
2922 nfsm_chain_add_32(error
, &nmrep
, (status
& ~NFSERR_AUTHERR
));
2924 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
2925 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2926 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2930 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
2931 /* XXX RPCAUTH_NULL verifier */
2932 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
2933 nfsm_chain_add_32(error
, &nmrep
, 0);
2934 /* accepted status */
2937 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
2940 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
2941 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2942 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2945 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
2948 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
2951 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
2952 if (status
!= NFSERR_RETVOID
) {
2953 nfsm_chain_add_32(error
, &nmrep
, status
);
2958 nfsm_chain_build_done(error
, &nmrep
);
2960 nfsm_chain_null(&nmrep
);
2963 error
= mbuf_setnext(nmrep
.nmc_mcur
, mrest
);
2965 printf("nfs cb: mbuf_setnext failed %d\n", error
);
2969 /* Calculate the size of the reply */
2971 for (m
= nmrep
.nmc_mhead
; m
; m
= mbuf_next(m
)) {
2972 replen
+= mbuf_len(m
);
2974 mbuf_pkthdr_setlen(mhead
, replen
);
2975 error
= mbuf_pkthdr_setrcvif(mhead
, NULL
);
2976 nfsm_chain_set_recmark(error
, &nmrep
, (replen
- NFSX_UNSIGNED
) | 0x80000000);
2977 nfsm_chain_null(&nmrep
);
2979 /* send the reply */
2980 bzero(&msg
, sizeof(msg
));
2981 error
= sock_sendmbuf(so
, &msg
, mhead
, 0, &sentlen
);
2983 if (!error
&& ((int)sentlen
!= replen
)) {
2984 error
= EWOULDBLOCK
;
2986 if (error
== EWOULDBLOCK
) { /* inability to send response is considered fatal */
2991 nfsm_chain_cleanup(&nmrep
);
3007 * Initialize an nfs_rpc_record_state structure.
3010 nfs_rpc_record_state_init(struct nfs_rpc_record_state
*nrrsp
)
3012 bzero(nrrsp
, sizeof(*nrrsp
));
3013 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3017 * Clean up an nfs_rpc_record_state structure.
3020 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state
*nrrsp
)
3022 if (nrrsp
->nrrs_m
) {
3023 mbuf_freem(nrrsp
->nrrs_m
);
3024 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3029 * Read the next (marked) RPC record from the socket.
3031 * *recvp returns if any data was received.
3032 * *mp returns the next complete RPC record
3035 nfs_rpc_record_read(socket_t so
, struct nfs_rpc_record_state
*nrrsp
, int flags
, int *recvp
, mbuf_t
*mp
)
3046 /* read the TCP RPC record marker */
3047 while (!error
&& nrrsp
->nrrs_markerleft
) {
3048 aio
.iov_base
= ((char*)&nrrsp
->nrrs_fragleft
+
3049 sizeof(nrrsp
->nrrs_fragleft
) - nrrsp
->nrrs_markerleft
);
3050 aio
.iov_len
= nrrsp
->nrrs_markerleft
;
3051 bzero(&msg
, sizeof(msg
));
3054 error
= sock_receive(so
, &msg
, flags
, &rcvlen
);
3055 if (error
|| !rcvlen
) {
3059 nrrsp
->nrrs_markerleft
-= rcvlen
;
3060 if (nrrsp
->nrrs_markerleft
) {
3063 /* record marker complete */
3064 nrrsp
->nrrs_fragleft
= ntohl(nrrsp
->nrrs_fragleft
);
3065 if (nrrsp
->nrrs_fragleft
& 0x80000000) {
3066 nrrsp
->nrrs_lastfrag
= 1;
3067 nrrsp
->nrrs_fragleft
&= ~0x80000000;
3069 nrrsp
->nrrs_reclen
+= nrrsp
->nrrs_fragleft
;
3070 if (nrrsp
->nrrs_reclen
> NFS_MAXPACKET
) {
3071 /* This is SERIOUS! We are out of sync with the sender. */
3072 log(LOG_ERR
, "impossible RPC record length (%d) on callback", nrrsp
->nrrs_reclen
);
3077 /* read the TCP RPC record fragment */
3078 while (!error
&& !nrrsp
->nrrs_markerleft
&& nrrsp
->nrrs_fragleft
) {
3080 rcvlen
= nrrsp
->nrrs_fragleft
;
3081 error
= sock_receivembuf(so
, NULL
, &m
, flags
, &rcvlen
);
3082 if (error
|| !rcvlen
|| !m
) {
3086 /* append mbufs to list */
3087 nrrsp
->nrrs_fragleft
-= rcvlen
;
3088 if (!nrrsp
->nrrs_m
) {
3091 error
= mbuf_setnext(nrrsp
->nrrs_mlast
, m
);
3093 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error
);
3098 while (mbuf_next(m
)) {
3101 nrrsp
->nrrs_mlast
= m
;
3104 /* done reading fragment? */
3105 if (!error
&& !nrrsp
->nrrs_markerleft
&& !nrrsp
->nrrs_fragleft
) {
3106 /* reset socket fragment parsing state */
3107 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3108 if (nrrsp
->nrrs_lastfrag
) {
3109 /* RPC record complete */
3110 *mp
= nrrsp
->nrrs_m
;
3111 /* reset socket record parsing state */
3112 nrrsp
->nrrs_reclen
= 0;
3113 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3114 nrrsp
->nrrs_lastfrag
= 0;
3124 * The NFS client send routine.
3126 * Send the given NFS request out the mount's socket.
3127 * Holds nfs_sndlock() for the duration of this call.
3129 * - check for request termination (sigintr)
3130 * - wait for reconnect, if necessary
3131 * - UDP: check the congestion window
3132 * - make a copy of the request to send
3133 * - UDP: update the congestion window
3134 * - send the request
3136 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3137 * rexmit count is also updated if this isn't the first send.
3139 * If the send is not successful, make sure R_MUSTRESEND is set.
3140 * If this wasn't the first transmit, set R_RESENDERR.
3141 * Also, undo any UDP congestion window changes made.
3143 * If the error appears to indicate that the socket should
3144 * be reconnected, mark the socket for reconnection.
3146 * Only return errors when the request should be aborted.
3149 nfs_send(struct nfsreq
*req
, int wait
)
3151 struct nfsmount
*nmp
;
3152 struct nfs_socket
*nso
;
3153 int error
, error2
, sotype
, rexmit
, slpflag
= 0, needrecon
;
3155 struct sockaddr
*sendnam
;
3158 struct timespec ts
= { 2, 0 };
3161 error
= nfs_sndlock(req
);
3163 lck_mtx_lock(&req
->r_mtx
);
3164 req
->r_error
= error
;
3165 req
->r_flags
&= ~R_SENDING
;
3166 lck_mtx_unlock(&req
->r_mtx
);
3170 error
= nfs_sigintr(req
->r_nmp
, req
, NULL
, 0);
3173 lck_mtx_lock(&req
->r_mtx
);
3174 req
->r_error
= error
;
3175 req
->r_flags
&= ~R_SENDING
;
3176 lck_mtx_unlock(&req
->r_mtx
);
3180 sotype
= nmp
->nm_sotype
;
3183 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3184 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3186 if (((req
->r_flags
& R_SETUP
) && !(nmp
->nm_sockflags
& NMSOCK_SETUP
)) ||
3187 ((req
->r_flags
& R_RECOVER
) && !(nmp
->nm_sockflags
& NMSOCK_READY
))) {
3190 lck_mtx_lock(&req
->r_mtx
);
3191 req
->r_error
= error
;
3192 req
->r_flags
&= ~R_SENDING
;
3193 lck_mtx_unlock(&req
->r_mtx
);
3197 /* If the socket needs reconnection, do that now. */
3198 /* wait until socket is ready - unless this request is part of setup */
3199 lck_mtx_lock(&nmp
->nm_lock
);
3200 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) &&
3201 !((nmp
->nm_sockflags
& NMSOCK_SETUP
) && (req
->r_flags
& R_SETUP
))) {
3202 if (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) {
3205 lck_mtx_unlock(&nmp
->nm_lock
);
3208 lck_mtx_lock(&req
->r_mtx
);
3209 req
->r_flags
&= ~R_SENDING
;
3210 req
->r_flags
|= R_MUSTRESEND
;
3212 lck_mtx_unlock(&req
->r_mtx
);
3215 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req
->r_xid
);
3216 lck_mtx_lock(&req
->r_mtx
);
3217 req
->r_flags
&= ~R_MUSTRESEND
;
3219 lck_mtx_unlock(&req
->r_mtx
);
3220 lck_mtx_lock(&nmp
->nm_lock
);
3221 while (!(nmp
->nm_sockflags
& NMSOCK_READY
)) {
3222 /* don't bother waiting if the socket thread won't be reconnecting it */
3223 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
3227 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (nmp
->nm_reconnect_start
> 0)) {
3230 if ((now
.tv_sec
- nmp
->nm_reconnect_start
) >= 8) {
3231 /* soft mount in reconnect for a while... terminate ASAP */
3232 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
3233 req
->r_flags
|= R_SOFTTERM
;
3234 req
->r_error
= error
= ETIMEDOUT
;
3238 /* make sure socket thread is running, then wait */
3239 nfs_mount_sock_thread_wake(nmp
);
3240 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3243 msleep(req
, &nmp
->nm_lock
, slpflag
| PSOCK
, "nfsconnectwait", &ts
);
3246 lck_mtx_unlock(&nmp
->nm_lock
);
3248 lck_mtx_lock(&req
->r_mtx
);
3249 req
->r_error
= error
;
3250 req
->r_flags
&= ~R_SENDING
;
3251 lck_mtx_unlock(&req
->r_mtx
);
3257 /* note that we're using the mount's socket to do the send */
3258 nmp
->nm_state
|= NFSSTA_SENDING
; /* will be cleared by nfs_sndunlock() */
3259 lck_mtx_unlock(&nmp
->nm_lock
);
3262 lck_mtx_lock(&req
->r_mtx
);
3263 req
->r_flags
&= ~R_SENDING
;
3264 req
->r_flags
|= R_MUSTRESEND
;
3266 lck_mtx_unlock(&req
->r_mtx
);
3270 lck_mtx_lock(&req
->r_mtx
);
3271 rexmit
= (req
->r_flags
& R_SENT
);
3273 if (sotype
== SOCK_DGRAM
) {
3274 lck_mtx_lock(&nmp
->nm_lock
);
3275 if (!(req
->r_flags
& R_CWND
) && (nmp
->nm_sent
>= nmp
->nm_cwnd
)) {
3276 /* if we can't send this out yet, wait on the cwnd queue */
3277 slpflag
= (NMFLAG(nmp
, INTR
) && req
->r_thread
) ? PCATCH
: 0;
3278 lck_mtx_unlock(&nmp
->nm_lock
);
3280 req
->r_flags
&= ~R_SENDING
;
3281 req
->r_flags
|= R_MUSTRESEND
;
3282 lck_mtx_unlock(&req
->r_mtx
);
3287 lck_mtx_lock(&nmp
->nm_lock
);
3288 while (nmp
->nm_sent
>= nmp
->nm_cwnd
) {
3289 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3292 TAILQ_INSERT_TAIL(&nmp
->nm_cwndq
, req
, r_cchain
);
3293 msleep(req
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfswaitcwnd", &ts
);
3295 if ((req
->r_cchain
.tqe_next
!= NFSREQNOLIST
)) {
3296 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3297 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3300 lck_mtx_unlock(&nmp
->nm_lock
);
3304 * We update these *before* the send to avoid racing
3305 * against others who may be looking to send requests.
3308 /* first transmit */
3309 req
->r_flags
|= R_CWND
;
3310 nmp
->nm_sent
+= NFS_CWNDSCALE
;
3313 * When retransmitting, turn timing off
3314 * and divide congestion window by 2.
3316 req
->r_flags
&= ~R_TIMING
;
3318 if (nmp
->nm_cwnd
< NFS_CWNDSCALE
) {
3319 nmp
->nm_cwnd
= NFS_CWNDSCALE
;
3322 lck_mtx_unlock(&nmp
->nm_lock
);
3325 req
->r_flags
&= ~R_MUSTRESEND
;
3326 lck_mtx_unlock(&req
->r_mtx
);
3328 error
= mbuf_copym(req
->r_mhead
, 0, MBUF_COPYALL
,
3329 wait
? MBUF_WAITOK
: MBUF_DONTWAIT
, &mreqcopy
);
3332 log(LOG_INFO
, "nfs_send: mbuf copy failed %d\n", error
);
3335 lck_mtx_lock(&req
->r_mtx
);
3336 req
->r_flags
&= ~R_SENDING
;
3337 req
->r_flags
|= R_MUSTRESEND
;
3339 lck_mtx_unlock(&req
->r_mtx
);
3343 bzero(&msg
, sizeof(msg
));
3344 if ((sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
) && ((sendnam
= nmp
->nm_saddr
))) {
3345 msg
.msg_name
= (caddr_t
)sendnam
;
3346 msg
.msg_namelen
= sendnam
->sa_len
;
3348 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreqcopy
, 0, &sentlen
);
3349 if (error
|| (sentlen
!= req
->r_mreqlen
)) {
3350 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3351 req
->r_xid
, (int)sentlen
, (int)req
->r_mreqlen
, error
);
3354 if (!error
&& (sentlen
!= req
->r_mreqlen
)) {
3355 error
= EWOULDBLOCK
;
3357 needrecon
= ((sotype
== SOCK_STREAM
) && sentlen
&& (sentlen
!= req
->r_mreqlen
));
3359 lck_mtx_lock(&req
->r_mtx
);
3360 req
->r_flags
&= ~R_SENDING
;
3362 if (rexmit
&& (++req
->r_rexmit
> NFS_MAXREXMIT
)) {
3363 req
->r_rexmit
= NFS_MAXREXMIT
;
3368 req
->r_flags
&= ~R_RESENDERR
;
3370 OSAddAtomic64(1, &nfsstats
.rpcretries
);
3372 req
->r_flags
|= R_SENT
;
3373 if (req
->r_flags
& R_WAITSENT
) {
3374 req
->r_flags
&= ~R_WAITSENT
;
3378 lck_mtx_unlock(&req
->r_mtx
);
3383 req
->r_flags
|= R_MUSTRESEND
;
3385 req
->r_flags
|= R_RESENDERR
;
3387 if ((error
== EINTR
) || (error
== ERESTART
)) {
3388 req
->r_error
= error
;
3390 lck_mtx_unlock(&req
->r_mtx
);
3392 if (sotype
== SOCK_DGRAM
) {
3394 * Note: even though a first send may fail, we consider
3395 * the request sent for congestion window purposes.
3396 * So we don't need to undo any of the changes made above.
3399 * Socket errors ignored for connectionless sockets??
3400 * For now, ignore them all
3402 if ((error
!= EINTR
) && (error
!= ERESTART
) &&
3403 (error
!= EWOULDBLOCK
) && (error
!= EIO
) && (nso
== nmp
->nm_nso
)) {
3404 int clearerror
= 0, optlen
= sizeof(clearerror
);
3405 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &clearerror
, &optlen
);
3406 #ifdef NFS_SOCKET_DEBUGGING
3408 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3415 /* check if it appears we should reconnect the socket */
3418 /* if send timed out, reconnect if on TCP */
3419 if (sotype
!= SOCK_STREAM
) {
3434 /* case ECANCELED??? */
3438 if (needrecon
&& (nso
== nmp
->nm_nso
)) { /* mark socket as needing reconnect */
3439 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req
->r_xid
, error
);
3440 nfs_need_reconnect(nmp
);
3445 if (nfs_is_dead(error
, nmp
)) {
3450 * Don't log some errors:
3451 * EPIPE errors may be common with servers that drop idle connections.
3452 * EADDRNOTAVAIL may occur on network transitions.
3453 * ENOTCONN may occur under some network conditions.
3455 if ((error
== EPIPE
) || (error
== EADDRNOTAVAIL
) || (error
== ENOTCONN
)) {
3458 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
3459 log(LOG_INFO
, "nfs send error %d for server %s\n", error
,
3460 !req
->r_nmp
? "<unmounted>" :
3461 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
);
3464 /* prefer request termination error over other errors */
3465 error2
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0);
3470 /* only allow the following errors to be returned */
3471 if ((error
!= EINTR
) && (error
!= ERESTART
) && (error
!= EIO
) &&
3472 (error
!= ENXIO
) && (error
!= ETIMEDOUT
)) {
3474 * We got some error we don't know what do do with,
3475 * i.e., we're not reconnecting, we map it to
3476 * EIO. Presumably our send failed and we better tell
3477 * the caller so they don't wait for a reply that is
3478 * never going to come. If we are reconnecting we
3479 * return 0 and the request will be resent.
3481 error
= needrecon
? 0 : EIO
;
3487 * NFS client socket upcalls
3489 * Pull RPC replies out of an NFS mount's socket and match them
3490 * up with the pending request.
3492 * The datagram code is simple because we always get whole
3493 * messages out of the socket.
3495 * The stream code is more involved because we have to parse
3496 * the RPC records out of the stream.
3499 /* NFS client UDP socket upcall */
3501 nfs_udp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3503 struct nfsmount
*nmp
= arg
;
3504 struct nfs_socket
*nso
= nmp
->nm_nso
;
3509 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3514 /* make sure we're on the current socket */
3515 if (!nso
|| (nso
->nso_so
!= so
)) {
3521 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
3523 nfs_request_match_reply(nmp
, m
);
3525 } while (m
&& !error
);
3527 if (error
&& (error
!= EWOULDBLOCK
)) {
3528 /* problems with the socket... mark for reconnection */
3529 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error
);
3530 nfs_need_reconnect(nmp
);
3534 /* NFS client TCP socket upcall */
3536 nfs_tcp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3538 struct nfsmount
*nmp
= arg
;
3539 struct nfs_socket
*nso
= nmp
->nm_nso
;
3540 struct nfs_rpc_record_state nrrs
;
3546 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3550 /* make sure we're on the current socket */
3551 lck_mtx_lock(&nmp
->nm_lock
);
3553 if (!nso
|| (nso
->nso_so
!= so
) || (nmp
->nm_sockflags
& (NMSOCK_DISCONNECTING
))) {
3554 lck_mtx_unlock(&nmp
->nm_lock
);
3557 lck_mtx_unlock(&nmp
->nm_lock
);
3559 /* make sure this upcall should be trying to do work */
3560 lck_mtx_lock(&nso
->nso_lock
);
3561 if (nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) {
3562 lck_mtx_unlock(&nso
->nso_lock
);
3565 nso
->nso_flags
|= NSO_UPCALL
;
3566 nrrs
= nso
->nso_rrs
;
3567 lck_mtx_unlock(&nso
->nso_lock
);
3569 /* loop while we make error-free progress */
3570 while (!error
&& recv
) {
3571 error
= nfs_rpc_record_read(so
, &nrrs
, MSG_DONTWAIT
, &recv
, &m
);
3572 if (m
) { /* match completed response with request */
3573 nfs_request_match_reply(nmp
, m
);
3577 /* Update the sockets's rpc parsing state */
3578 lck_mtx_lock(&nso
->nso_lock
);
3579 nso
->nso_rrs
= nrrs
;
3580 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
3583 nso
->nso_flags
&= ~NSO_UPCALL
;
3584 lck_mtx_unlock(&nso
->nso_lock
);
3586 wakeup(&nso
->nso_flags
);
3589 #ifdef NFS_SOCKET_DEBUGGING
3590 if (!recv
&& (error
!= EWOULDBLOCK
)) {
3591 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error
);
3594 /* note: no error and no data indicates server closed its end */
3595 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
3596 /* problems with the socket... mark for reconnection */
3597 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error
);
3598 nfs_need_reconnect(nmp
);
3603 * "poke" a socket to try to provoke any pending errors
3606 nfs_sock_poke(struct nfsmount
*nmp
)
3614 lck_mtx_lock(&nmp
->nm_lock
);
3615 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) ||
3616 !(nmp
->nm_sockflags
& NMSOCK_READY
) || !nmp
->nm_nso
|| !nmp
->nm_nso
->nso_so
) {
3617 /* Nothing to poke */
3618 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3619 wakeup(&nmp
->nm_sockflags
);
3620 lck_mtx_unlock(&nmp
->nm_lock
);
3623 lck_mtx_unlock(&nmp
->nm_lock
);
3624 aio
.iov_base
= &dummy
;
3627 bzero(&msg
, sizeof(msg
));
3630 error
= sock_send(nmp
->nm_nso
->nso_so
, &msg
, MSG_DONTWAIT
, &len
);
3631 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error
);
3632 lck_mtx_lock(&nmp
->nm_lock
);
3633 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3634 wakeup(&nmp
->nm_sockflags
);
3635 lck_mtx_unlock(&nmp
->nm_lock
);
3636 nfs_is_dead(error
, nmp
);
3640 * Match an RPC reply with the corresponding request
3643 nfs_request_match_reply(struct nfsmount
*nmp
, mbuf_t mrep
)
3646 struct nfsm_chain nmrep
;
3647 u_int32_t reply
= 0, rxid
= 0;
3648 int error
= 0, asyncioq
, t1
;
3650 /* Get the xid and check that it is an rpc reply */
3651 nfsm_chain_dissect_init(error
, &nmrep
, mrep
);
3652 nfsm_chain_get_32(error
, &nmrep
, rxid
);
3653 nfsm_chain_get_32(error
, &nmrep
, reply
);
3654 if (error
|| (reply
!= RPC_REPLY
)) {
3655 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3661 * Loop through the request list to match up the reply
3662 * Iff no match, just drop it.
3664 lck_mtx_lock(nfs_request_mutex
);
3665 TAILQ_FOREACH(req
, &nfs_reqq
, r_chain
) {
3666 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3669 /* looks like we have it, grab lock and double check */
3670 lck_mtx_lock(&req
->r_mtx
);
3671 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3672 lck_mtx_unlock(&req
->r_mtx
);
3676 req
->r_nmrep
= nmrep
;
3677 lck_mtx_lock(&nmp
->nm_lock
);
3678 if (nmp
->nm_sotype
== SOCK_DGRAM
) {
3680 * Update congestion window.
3681 * Do the additive increase of one rpc/rtt.
3683 FSDBG(530, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3684 if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
3686 ((NFS_CWNDSCALE
* NFS_CWNDSCALE
) +
3687 (nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
3688 if (nmp
->nm_cwnd
> NFS_MAXCWND
) {
3689 nmp
->nm_cwnd
= NFS_MAXCWND
;
3692 if (req
->r_flags
& R_CWND
) {
3693 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3694 req
->r_flags
&= ~R_CWND
;
3696 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3697 /* congestion window is open, poke the cwnd queue */
3698 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3699 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3700 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3705 * Update rtt using a gain of 0.125 on the mean
3706 * and a gain of 0.25 on the deviation.
3708 if (req
->r_flags
& R_TIMING
) {
3710 * Since the timer resolution of
3711 * NFS_HZ is so course, it can often
3712 * result in r_rtt == 0. Since
3713 * r_rtt == N means that the actual
3714 * rtt is between N+dt and N+2-dt ticks,
3717 if (proct
[req
->r_procnum
] == 0) {
3718 panic("nfs_request_match_reply: proct[%d] is zero", req
->r_procnum
);
3720 t1
= req
->r_rtt
+ 1;
3721 t1
-= (NFS_SRTT(req
) >> 3);
3722 NFS_SRTT(req
) += t1
;
3726 t1
-= (NFS_SDRTT(req
) >> 2);
3727 NFS_SDRTT(req
) += t1
;
3729 nmp
->nm_timeouts
= 0;
3730 lck_mtx_unlock(&nmp
->nm_lock
);
3731 /* signal anyone waiting on this request */
3733 asyncioq
= (req
->r_callback
.rcb_func
!= NULL
);
3734 if (nfs_request_using_gss(req
)) {
3735 nfs_gss_clnt_rpcdone(req
);
3737 lck_mtx_unlock(&req
->r_mtx
);
3738 lck_mtx_unlock(nfs_request_mutex
);
3739 /* if it's an async RPC with a callback, queue it up */
3741 nfs_asyncio_finish(req
);
3747 /* not matched to a request, so drop it. */
3748 lck_mtx_unlock(nfs_request_mutex
);
3749 OSAddAtomic64(1, &nfsstats
.rpcunexpected
);
3755 * Wait for the reply for a given request...
3756 * ...potentially resending the request if necessary.
3759 nfs_wait_reply(struct nfsreq
*req
)
3761 struct timespec ts
= { 2, 0 };
3762 int error
= 0, slpflag
, first
= 1;
3764 if (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
3770 lck_mtx_lock(&req
->r_mtx
);
3771 while (!req
->r_nmrep
.nmc_mhead
) {
3772 if ((error
= nfs_sigintr(req
->r_nmp
, req
, first
? NULL
: req
->r_thread
, 0))) {
3775 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
3778 /* check if we need to resend */
3779 if (req
->r_flags
& R_MUSTRESEND
) {
3780 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3781 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
3782 req
->r_flags
|= R_SENDING
;
3783 lck_mtx_unlock(&req
->r_mtx
);
3784 if (nfs_request_using_gss(req
)) {
3786 * It's an RPCSEC_GSS request.
3787 * Can't just resend the original request
3788 * without bumping the cred sequence number.
3789 * Go back and re-build the request.
3791 lck_mtx_lock(&req
->r_mtx
);
3792 req
->r_flags
&= ~R_SENDING
;
3793 lck_mtx_unlock(&req
->r_mtx
);
3796 error
= nfs_send(req
, 1);
3797 lck_mtx_lock(&req
->r_mtx
);
3798 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3799 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
, error
);
3803 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
3807 /* need to poll if we're P_NOREMOTEHANG */
3808 if (nfs_noremotehang(req
->r_thread
)) {
3811 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitreply", &ts
);
3812 first
= slpflag
= 0;
3814 lck_mtx_unlock(&req
->r_mtx
);
3820 * An NFS request goes something like this:
3821 * (nb: always frees up mreq mbuf list)
3822 * nfs_request_create()
3823 * - allocates a request struct if one is not provided
3824 * - initial fill-in of the request struct
3825 * nfs_request_add_header()
3826 * - add the RPC header
3827 * nfs_request_send()
3828 * - link it into list
3829 * - call nfs_send() for first transmit
3830 * nfs_request_wait()
3831 * - call nfs_wait_reply() to wait for the reply
3832 * nfs_request_finish()
3833 * - break down rpc header and return with error or nfs reply
3834 * pointed to by nmrep.
3835 * nfs_request_rele()
3836 * nfs_request_destroy()
3837 * - clean up the request struct
3838 * - free the request struct if it was allocated by nfs_request_create()
3842 * Set up an NFS request struct (allocating if no request passed in).
3847 mount_t mp
, /* used only if !np */
3848 struct nfsm_chain
*nmrest
,
3852 struct nfsreq
**reqp
)
3854 struct nfsreq
*req
, *newreq
= NULL
;
3855 struct nfsmount
*nmp
;
3859 /* allocate a new NFS request structure */
3860 MALLOC_ZONE(newreq
, struct nfsreq
*, sizeof(*newreq
), M_NFSREQ
, M_WAITOK
);
3862 mbuf_freem(nmrest
->nmc_mhead
);
3863 nmrest
->nmc_mhead
= NULL
;
3869 bzero(req
, sizeof(*req
));
3870 if (req
== newreq
) {
3871 req
->r_flags
= R_ALLOCATED
;
3874 nmp
= VFSTONFS(np
? NFSTOMP(np
) : mp
);
3875 if (nfs_mount_gone(nmp
)) {
3877 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3881 lck_mtx_lock(&nmp
->nm_lock
);
3882 if ((nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
3883 (nmp
->nm_state
& NFSSTA_TIMEO
)) {
3884 lck_mtx_unlock(&nmp
->nm_lock
);
3885 mbuf_freem(nmrest
->nmc_mhead
);
3886 nmrest
->nmc_mhead
= NULL
;
3888 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3893 if ((nmp
->nm_vers
!= NFS_VER4
) && (procnum
>= 0) && (procnum
< NFS_NPROCS
)) {
3894 OSAddAtomic64(1, &nfsstats
.rpccnt
[procnum
]);
3896 if ((nmp
->nm_vers
== NFS_VER4
) && (procnum
!= NFSPROC4_COMPOUND
) && (procnum
!= NFSPROC4_NULL
)) {
3897 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum
);
3900 lck_mtx_init(&req
->r_mtx
, nfs_request_grp
, LCK_ATTR_NULL
);
3904 req
->r_thread
= thd
;
3906 req
->r_flags
|= R_NOINTR
;
3908 if (IS_VALID_CRED(cred
)) {
3909 kauth_cred_ref(cred
);
3912 req
->r_procnum
= procnum
;
3913 if (proct
[procnum
] > 0) {
3914 req
->r_flags
|= R_TIMING
;
3916 req
->r_nmrep
.nmc_mhead
= NULL
;
3917 SLIST_INIT(&req
->r_gss_seqlist
);
3918 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3919 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3920 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3922 /* set auth flavor to use for request */
3924 req
->r_auth
= RPCAUTH_NONE
;
3925 } else if (req
->r_np
&& (req
->r_np
->n_auth
!= RPCAUTH_INVALID
)) {
3926 req
->r_auth
= req
->r_np
->n_auth
;
3928 req
->r_auth
= nmp
->nm_auth
;
3931 lck_mtx_unlock(&nmp
->nm_lock
);
3933 /* move the request mbuf chain to the nfsreq */
3934 req
->r_mrest
= nmrest
->nmc_mhead
;
3935 nmrest
->nmc_mhead
= NULL
;
3937 req
->r_flags
|= R_INITTED
;
3946 * Clean up and free an NFS request structure.
3949 nfs_request_destroy(struct nfsreq
*req
)
3951 struct nfsmount
*nmp
;
3952 struct gss_seq
*gsp
, *ngsp
;
3953 int clearjbtimeo
= 0;
3955 if (!req
|| !(req
->r_flags
& R_INITTED
)) {
3959 req
->r_flags
&= ~R_INITTED
;
3960 if (req
->r_lflags
& RL_QUEUED
) {
3961 nfs_reqdequeue(req
);
3964 if (req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
3966 * Still on an async I/O queue?
3967 * %%% But which one, we may be on a local iod.
3969 lck_mtx_lock(nfsiod_mutex
);
3970 if (nmp
&& req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
3971 TAILQ_REMOVE(&nmp
->nm_iodq
, req
, r_achain
);
3972 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3974 lck_mtx_unlock(nfsiod_mutex
);
3977 lck_mtx_lock(&req
->r_mtx
);
3979 lck_mtx_lock(&nmp
->nm_lock
);
3980 if (req
->r_flags
& R_CWND
) {
3981 /* Decrement the outstanding request count. */
3982 req
->r_flags
&= ~R_CWND
;
3983 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3984 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3985 /* congestion window is open, poke the cwnd queue */
3986 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3987 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3988 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3992 assert((req
->r_flags
& R_RESENDQ
) == 0);
3993 /* XXX should we just remove this conditional, we should have a reference if we're resending */
3994 if (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
) {
3995 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
3996 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3997 if (req
->r_flags
& R_RESENDQ
) {
3998 req
->r_flags
&= ~R_RESENDQ
;
4001 if (req
->r_cchain
.tqe_next
!= NFSREQNOLIST
) {
4002 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
4003 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4005 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4006 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4008 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4010 lck_mtx_unlock(&nmp
->nm_lock
);
4012 lck_mtx_unlock(&req
->r_mtx
);
4015 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4018 mbuf_freem(req
->r_mhead
);
4019 } else if (req
->r_mrest
) {
4020 mbuf_freem(req
->r_mrest
);
4022 if (req
->r_nmrep
.nmc_mhead
) {
4023 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
4025 if (IS_VALID_CRED(req
->r_cred
)) {
4026 kauth_cred_unref(&req
->r_cred
);
4028 if (nfs_request_using_gss(req
)) {
4029 nfs_gss_clnt_rpcdone(req
);
4031 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
)
4033 if (req
->r_gss_ctx
) {
4034 nfs_gss_clnt_ctx_unref(req
);
4036 if (req
->r_wrongsec
) {
4037 FREE(req
->r_wrongsec
, M_TEMP
);
4040 nfs_mount_rele(nmp
);
4042 lck_mtx_destroy(&req
->r_mtx
, nfs_request_grp
);
4043 if (req
->r_flags
& R_ALLOCATED
) {
4044 FREE_ZONE(req
, sizeof(*req
), M_NFSREQ
);
4049 nfs_request_ref(struct nfsreq
*req
, int locked
)
4052 lck_mtx_lock(&req
->r_mtx
);
4054 if (req
->r_refs
<= 0) {
4055 panic("nfsreq reference error");
4059 lck_mtx_unlock(&req
->r_mtx
);
4064 nfs_request_rele(struct nfsreq
*req
)
4068 lck_mtx_lock(&req
->r_mtx
);
4069 if (req
->r_refs
<= 0) {
4070 panic("nfsreq reference underflow");
4073 destroy
= (req
->r_refs
== 0);
4074 lck_mtx_unlock(&req
->r_mtx
);
4076 nfs_request_destroy(req
);
4082 * Add an (updated) RPC header with authorization to an NFS request.
4085 nfs_request_add_header(struct nfsreq
*req
)
4087 struct nfsmount
*nmp
;
4091 /* free up any previous header */
4092 if ((m
= req
->r_mhead
)) {
4093 while (m
&& (m
!= req
->r_mrest
)) {
4096 req
->r_mhead
= NULL
;
4100 if (nfs_mount_gone(nmp
)) {
4104 error
= nfsm_rpchead(req
, req
->r_mrest
, &req
->r_xid
, &req
->r_mhead
);
4109 req
->r_mreqlen
= mbuf_pkthdr_len(req
->r_mhead
);
4111 if (nfs_mount_gone(nmp
)) {
4114 lck_mtx_lock(&nmp
->nm_lock
);
4115 if (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) {
4116 req
->r_retry
= nmp
->nm_retry
;
4118 req
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
4120 lck_mtx_unlock(&nmp
->nm_lock
);
4127 * Queue an NFS request up and send it out.
4130 nfs_request_send(struct nfsreq
*req
, int wait
)
4132 struct nfsmount
*nmp
;
4135 lck_mtx_lock(&req
->r_mtx
);
4136 req
->r_flags
|= R_SENDING
;
4137 lck_mtx_unlock(&req
->r_mtx
);
4139 lck_mtx_lock(nfs_request_mutex
);
4142 if (nfs_mount_gone(nmp
)) {
4143 lck_mtx_unlock(nfs_request_mutex
);
4148 if (!req
->r_start
) {
4149 req
->r_start
= now
.tv_sec
;
4150 req
->r_lastmsg
= now
.tv_sec
-
4151 ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
4154 OSAddAtomic64(1, &nfsstats
.rpcrequests
);
4157 * Chain request into list of outstanding requests. Be sure
4158 * to put it LAST so timer finds oldest requests first.
4159 * Make sure that the request queue timer is running
4160 * to check for possible request timeout.
4162 TAILQ_INSERT_TAIL(&nfs_reqq
, req
, r_chain
);
4163 req
->r_lflags
|= RL_QUEUED
;
4164 if (!nfs_request_timer_on
) {
4165 nfs_request_timer_on
= 1;
4166 nfs_interval_timer_start(nfs_request_timer_call
,
4169 lck_mtx_unlock(nfs_request_mutex
);
4171 /* Send the request... */
4172 return nfs_send(req
, wait
);
4176 * Call nfs_wait_reply() to wait for the reply.
4179 nfs_request_wait(struct nfsreq
*req
)
4181 req
->r_error
= nfs_wait_reply(req
);
4185 * Finish up an NFS request by dequeueing it and
4186 * doing the initial NFS request reply processing.
4191 struct nfsm_chain
*nmrepp
,
4194 struct nfsmount
*nmp
;
4197 uint32_t verf_len
= 0;
4198 uint32_t reply_status
= 0;
4199 uint32_t rejected_status
= 0;
4200 uint32_t auth_status
= 0;
4201 uint32_t accepted_status
= 0;
4202 struct nfsm_chain nmrep
;
4203 int error
, clearjbtimeo
;
4205 error
= req
->r_error
;
4208 nmrepp
->nmc_mhead
= NULL
;
4211 /* RPC done, unlink the request. */
4212 nfs_reqdequeue(req
);
4214 mrep
= req
->r_nmrep
.nmc_mhead
;
4218 if ((req
->r_flags
& R_CWND
) && nmp
) {
4220 * Decrement the outstanding request count.
4222 req
->r_flags
&= ~R_CWND
;
4223 lck_mtx_lock(&nmp
->nm_lock
);
4224 FSDBG(273, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4225 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4226 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4227 /* congestion window is open, poke the cwnd queue */
4228 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4229 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4230 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4233 lck_mtx_unlock(&nmp
->nm_lock
);
4236 if (nfs_request_using_gss(req
)) {
4238 * If the request used an RPCSEC_GSS credential
4239 * then reset its sequence number bit in the
4242 nfs_gss_clnt_rpcdone(req
);
4245 * If we need to re-send, go back and re-build the
4246 * request based on a new sequence number.
4247 * Note that we're using the original XID.
4249 if (error
== EAGAIN
) {
4254 error
= nfs_gss_clnt_args_restore(req
); // remove any trailer mbufs
4255 req
->r_nmrep
.nmc_mhead
= NULL
;
4256 req
->r_flags
|= R_RESTART
;
4257 if (error
== ENEEDAUTH
) {
4258 req
->r_xid
= 0; // get a new XID
4266 * If there was a successful reply, make sure to mark the mount as up.
4267 * If a tprintf message was given (or if this is a timed-out soft mount)
4268 * then post a tprintf message indicating the server is alive again.
4271 if ((req
->r_flags
& R_TPRINTFMSG
) ||
4272 (nmp
&& (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) &&
4273 ((nmp
->nm_state
& (NFSSTA_TIMEO
| NFSSTA_FORCE
| NFSSTA_DEAD
)) == NFSSTA_TIMEO
))) {
4274 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, "is alive again");
4276 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, NULL
);
4279 if (!error
&& !nmp
) {
4285 * break down the RPC header and check if ok
4287 nmrep
= req
->r_nmrep
;
4288 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
4290 if (reply_status
== RPC_MSGDENIED
) {
4291 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
4293 if (rejected_status
== RPC_MISMATCH
) {
4297 nfsm_chain_get_32(error
, &nmrep
, auth_status
);
4299 switch (auth_status
) {
4300 case RPCSEC_GSS_CREDPROBLEM
:
4301 case RPCSEC_GSS_CTXPROBLEM
:
4303 * An RPCSEC_GSS cred or context problem.
4304 * We can't use it anymore.
4305 * Restore the args, renew the context
4306 * and set up for a resend.
4308 error
= nfs_gss_clnt_args_restore(req
);
4309 if (error
&& error
!= ENEEDAUTH
) {
4314 error
= nfs_gss_clnt_ctx_renew(req
);
4320 req
->r_nmrep
.nmc_mhead
= NULL
;
4321 req
->r_xid
= 0; // get a new XID
4322 req
->r_flags
|= R_RESTART
;
4331 /* Now check the verifier */
4332 nfsm_chain_get_32(error
, &nmrep
, verf_type
); // verifier flavor
4333 nfsm_chain_get_32(error
, &nmrep
, verf_len
); // verifier length
4336 switch (req
->r_auth
) {
4339 /* Any AUTH_SYS verifier is ignored */
4341 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
4343 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
4348 error
= nfs_gss_clnt_verf_get(req
, &nmrep
,
4349 verf_type
, verf_len
, &accepted_status
);
4354 switch (accepted_status
) {
4356 if (req
->r_procnum
== NFSPROC_NULL
) {
4358 * The NFS null procedure is unique,
4359 * in not returning an NFS status.
4363 nfsm_chain_get_32(error
, &nmrep
, *status
);
4367 if ((nmp
->nm_vers
!= NFS_VER2
) && (*status
== NFSERR_TRYLATER
)) {
4369 * It's a JUKEBOX error - delay and try again
4371 int delay
, slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4374 req
->r_nmrep
.nmc_mhead
= NULL
;
4375 if ((req
->r_delay
>= 30) && !(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4376 /* we're not yet completely mounted and */
4377 /* we can't complete an RPC, so we fail */
4378 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4380 error
= req
->r_error
;
4383 req
->r_delay
= !req
->r_delay
? NFS_TRYLATERDEL
: (req
->r_delay
* 2);
4384 if (req
->r_delay
> 30) {
4387 if (nmp
->nm_tprintf_initial_delay
&& (req
->r_delay
>= nmp
->nm_tprintf_initial_delay
)) {
4388 if (!(req
->r_flags
& R_JBTPRINTFMSG
)) {
4389 req
->r_flags
|= R_JBTPRINTFMSG
;
4390 lck_mtx_lock(&nmp
->nm_lock
);
4392 lck_mtx_unlock(&nmp
->nm_lock
);
4394 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_JUKEBOXTIMEO
,
4395 "resource temporarily unavailable (jukebox)", 0);
4397 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (req
->r_delay
== 30) &&
4398 !(req
->r_flags
& R_NOINTR
)) {
4399 /* for soft mounts, just give up after a short while */
4400 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4402 error
= req
->r_error
;
4405 delay
= req
->r_delay
;
4406 if (req
->r_callback
.rcb_func
) {
4409 req
->r_resendtime
= now
.tv_sec
+ delay
;
4412 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
4415 tsleep(nfs_request_finish
, PSOCK
| slpflag
, "nfs_jukebox_trylater", hz
);
4417 } while (--delay
> 0);
4419 req
->r_xid
= 0; // get a new XID
4420 req
->r_flags
|= R_RESTART
;
4422 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_TRYLATER
);
4426 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4427 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4428 lck_mtx_lock(&nmp
->nm_lock
);
4430 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4431 lck_mtx_unlock(&nmp
->nm_lock
);
4432 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, "resource available again");
4435 if ((nmp
->nm_vers
>= NFS_VER4
) && (*status
== NFSERR_WRONGSEC
)) {
4437 * Hmmm... we need to try a different security flavor.
4438 * The first time a request hits this, we will allocate an array
4439 * to track flavors to try. We fill the array with the mount's
4440 * preferred flavors or the server's preferred flavors or just the
4441 * flavors we support.
4443 uint32_t srvflavors
[NX_MAX_SEC_FLAVORS
];
4446 /* Call SECINFO to try to get list of flavors from server. */
4447 srvcount
= NX_MAX_SEC_FLAVORS
;
4448 nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, req
->r_cred
, srvflavors
, &srvcount
);
4450 if (!req
->r_wrongsec
) {
4451 /* first time... set up flavor array */
4452 MALLOC(req
->r_wrongsec
, uint32_t*, NX_MAX_SEC_FLAVORS
* sizeof(uint32_t), M_TEMP
, M_WAITOK
);
4453 if (!req
->r_wrongsec
) {
4458 if (nmp
->nm_sec
.count
) { /* use the mount's preferred list of flavors */
4459 for (; i
< nmp
->nm_sec
.count
; i
++) {
4460 req
->r_wrongsec
[i
] = nmp
->nm_sec
.flavors
[i
];
4462 } else if (srvcount
) { /* otherwise use the server's list of flavors */
4463 for (; i
< srvcount
; i
++) {
4464 req
->r_wrongsec
[i
] = srvflavors
[i
];
4466 } else { /* otherwise, just try the flavors we support. */
4467 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5P
;
4468 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5I
;
4469 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5
;
4470 req
->r_wrongsec
[i
++] = RPCAUTH_SYS
;
4471 req
->r_wrongsec
[i
++] = RPCAUTH_NONE
;
4473 for (; i
< NX_MAX_SEC_FLAVORS
; i
++) { /* invalidate any remaining slots */
4474 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4478 /* clear the current flavor from the list */
4479 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4480 if (req
->r_wrongsec
[i
] == req
->r_auth
) {
4481 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4485 /* find the next flavor to try */
4486 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4487 if (req
->r_wrongsec
[i
] != RPCAUTH_INVALID
) {
4488 if (!srvcount
) { /* no server list, just try it */
4491 /* check that it's in the server's list */
4492 for (j
= 0; j
< srvcount
; j
++) {
4493 if (req
->r_wrongsec
[i
] == srvflavors
[j
]) {
4497 if (j
< srvcount
) { /* found */
4500 /* not found in server list */
4501 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4504 if (i
== NX_MAX_SEC_FLAVORS
) {
4505 /* nothing left to try! */
4510 /* retry with the next auth flavor */
4511 req
->r_auth
= req
->r_wrongsec
[i
];
4512 req
->r_xid
= 0; // get a new XID
4513 req
->r_flags
|= R_RESTART
;
4515 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_WRONGSEC
);
4518 if ((nmp
->nm_vers
>= NFS_VER4
) && req
->r_wrongsec
) {
4520 * We renegotiated security for this request; so update the
4521 * default security flavor for the associated node.
4524 req
->r_np
->n_auth
= req
->r_auth
;
4528 if (*status
== NFS_OK
) {
4530 * Successful NFS request
4533 req
->r_nmrep
.nmc_mhead
= NULL
;
4536 /* Got an NFS error of some kind */
4539 * If the File Handle was stale, invalidate the
4540 * lookup cache, just in case.
4542 if ((*status
== ESTALE
) && req
->r_np
) {
4543 cache_purge(NFSTOV(req
->r_np
));
4544 /* if monitored, also send delete event */
4545 if (vnode_ismonitored(NFSTOV(req
->r_np
))) {
4546 nfs_vnode_notify(req
->r_np
, (VNODE_EVENT_ATTRIB
| VNODE_EVENT_DELETE
));
4549 if (nmp
->nm_vers
== NFS_VER2
) {
4554 req
->r_nmrep
.nmc_mhead
= NULL
;
4557 case RPC_PROGUNAVAIL
:
4558 error
= EPROGUNAVAIL
;
4560 case RPC_PROGMISMATCH
:
4561 error
= ERPCMISMATCH
;
4563 case RPC_PROCUNAVAIL
:
4564 error
= EPROCUNAVAIL
;
4569 case RPC_SYSTEM_ERR
:
4575 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4576 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4577 lck_mtx_lock(&nmp
->nm_lock
);
4579 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4580 lck_mtx_unlock(&nmp
->nm_lock
);
4582 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4585 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
,
4586 (!error
&& (*status
== NFS_OK
)) ? 0xf0f0f0f0 : error
);
4591 * NFS request using a GSS/Kerberos security flavor?
4594 nfs_request_using_gss(struct nfsreq
*req
)
4596 if (!req
->r_gss_ctx
) {
4599 switch (req
->r_auth
) {
4609 * Perform an NFS request synchronously.
4615 mount_t mp
, /* used only if !np */
4616 struct nfsm_chain
*nmrest
,
4619 struct nfsreq_secinfo_args
*si
,
4620 struct nfsm_chain
*nmrepp
,
4624 return nfs_request2(np
, mp
, nmrest
, procnum
,
4625 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4626 si
, 0, nmrepp
, xidp
, status
);
4632 mount_t mp
, /* used only if !np */
4633 struct nfsm_chain
*nmrest
,
4637 struct nfsreq_secinfo_args
*si
,
4639 struct nfsm_chain
*nmrepp
,
4643 struct nfsreq rq
, *req
= &rq
;
4646 if ((error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, &req
))) {
4649 req
->r_flags
|= (flags
& (R_OPTMASK
| R_SOFT
));
4651 req
->r_secinfo
= *si
;
4654 FSDBG_TOP(273, R_XID32(req
->r_xid
), np
, procnum
, 0);
4657 req
->r_flags
&= ~R_RESTART
;
4658 if ((error
= nfs_request_add_header(req
))) {
4664 if ((error
= nfs_request_send(req
, 1))) {
4667 nfs_request_wait(req
);
4668 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4671 } while (req
->r_flags
& R_RESTART
);
4673 FSDBG_BOT(273, R_XID32(req
->r_xid
), np
, procnum
, error
);
4674 nfs_request_rele(req
);
4680 * Set up a new null proc request to exchange GSS context tokens with the
4681 * server. Associate the context that we are setting up with the request that we
4688 struct nfsm_chain
*nmrest
,
4692 struct nfs_gss_clnt_ctx
*cp
, /* Set to gss context to renew or setup */
4693 struct nfsm_chain
*nmrepp
,
4696 struct nfsreq rq
, *req
= &rq
;
4697 int error
, wait
= 1;
4699 if ((error
= nfs_request_create(NULL
, mp
, nmrest
, NFSPROC_NULL
, thd
, cred
, &req
))) {
4702 req
->r_flags
|= (flags
& R_OPTMASK
);
4705 printf("nfs_request_gss request has no context\n");
4706 nfs_request_rele(req
);
4707 return NFSERR_EAUTH
;
4709 nfs_gss_clnt_ctx_ref(req
, cp
);
4712 * Don't wait for a reply to a context destroy advisory
4713 * to avoid hanging on a dead server.
4715 if (cp
->gss_clnt_proc
== RPCSEC_GSS_DESTROY
) {
4719 FSDBG_TOP(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, 0);
4722 req
->r_flags
&= ~R_RESTART
;
4723 if ((error
= nfs_request_add_header(req
))) {
4727 if ((error
= nfs_request_send(req
, wait
))) {
4734 nfs_request_wait(req
);
4735 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4738 } while (req
->r_flags
& R_RESTART
);
4740 FSDBG_BOT(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, error
);
4742 nfs_gss_clnt_ctx_unref(req
);
4743 nfs_request_rele(req
);
4749 * Create and start an asynchronous NFS request.
4754 mount_t mp
, /* used only if !np */
4755 struct nfsm_chain
*nmrest
,
4759 struct nfsreq_secinfo_args
*si
,
4761 struct nfsreq_cbinfo
*cb
,
4762 struct nfsreq
**reqp
)
4765 struct nfsmount
*nmp
;
4768 error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, reqp
);
4770 FSDBG(274, (req
? R_XID32(req
->r_xid
) : 0), np
, procnum
, error
);
4774 req
->r_flags
|= (flags
& R_OPTMASK
);
4775 req
->r_flags
|= R_ASYNC
;
4777 req
->r_secinfo
= *si
;
4780 req
->r_callback
= *cb
;
4782 error
= nfs_request_add_header(req
);
4784 req
->r_flags
|= R_WAITSENT
;
4785 if (req
->r_callback
.rcb_func
) {
4786 nfs_request_ref(req
, 0);
4788 error
= nfs_request_send(req
, 1);
4789 lck_mtx_lock(&req
->r_mtx
);
4790 if (!error
&& !(req
->r_flags
& R_SENT
) && req
->r_callback
.rcb_func
) {
4791 /* make sure to wait until this async I/O request gets sent */
4792 int slpflag
= (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4793 struct timespec ts
= { 2, 0 };
4794 while (!(req
->r_flags
& R_SENT
)) {
4796 if ((req
->r_flags
& R_RESENDQ
) && !nfs_mount_gone(nmp
)) {
4797 lck_mtx_lock(&nmp
->nm_lock
);
4798 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4800 * It's not going to get off the resend queue if we're in recovery.
4801 * So, just take it off ourselves. We could be holding mount state
4802 * busy and thus holding up the start of recovery.
4804 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4805 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4806 if (req
->r_flags
& R_RESENDQ
) {
4807 req
->r_flags
&= ~R_RESENDQ
;
4809 lck_mtx_unlock(&nmp
->nm_lock
);
4810 req
->r_flags
|= R_SENDING
;
4811 lck_mtx_unlock(&req
->r_mtx
);
4812 error
= nfs_send(req
, 1);
4813 /* Remove the R_RESENDQ reference */
4814 nfs_request_rele(req
);
4815 lck_mtx_lock(&req
->r_mtx
);
4821 lck_mtx_unlock(&nmp
->nm_lock
);
4823 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
4826 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitsent", &ts
);
4830 sent
= req
->r_flags
& R_SENT
;
4831 lck_mtx_unlock(&req
->r_mtx
);
4832 if (error
&& req
->r_callback
.rcb_func
&& !sent
) {
4833 nfs_request_rele(req
);
4836 FSDBG(274, R_XID32(req
->r_xid
), np
, procnum
, error
);
4837 if (error
|| req
->r_callback
.rcb_func
) {
4838 nfs_request_rele(req
);
4845 * Wait for and finish an asynchronous NFS request.
4848 nfs_request_async_finish(
4850 struct nfsm_chain
*nmrepp
,
4854 int error
= 0, asyncio
= req
->r_callback
.rcb_func
? 1 : 0;
4855 struct nfsmount
*nmp
;
4857 lck_mtx_lock(&req
->r_mtx
);
4859 req
->r_flags
|= R_ASYNCWAIT
;
4861 while (req
->r_flags
& R_RESENDQ
) { /* wait until the request is off the resend queue */
4862 struct timespec ts
= { 2, 0 };
4864 if ((nmp
= req
->r_nmp
)) {
4865 lck_mtx_lock(&nmp
->nm_lock
);
4866 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4868 * It's not going to get off the resend queue if we're in recovery.
4869 * So, just take it off ourselves. We could be holding mount state
4870 * busy and thus holding up the start of recovery.
4872 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4873 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4874 if (req
->r_flags
& R_RESENDQ
) {
4875 req
->r_flags
&= ~R_RESENDQ
;
4877 /* Remove the R_RESENDQ reference */
4878 assert(req
->r_refs
> 0);
4880 lck_mtx_unlock(&nmp
->nm_lock
);
4883 lck_mtx_unlock(&nmp
->nm_lock
);
4885 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
4888 msleep(req
, &req
->r_mtx
, PZERO
- 1, "nfsresendqwait", &ts
);
4890 lck_mtx_unlock(&req
->r_mtx
);
4893 nfs_request_wait(req
);
4894 error
= nfs_request_finish(req
, nmrepp
, status
);
4897 while (!error
&& (req
->r_flags
& R_RESTART
)) {
4899 assert(req
->r_achain
.tqe_next
== NFSREQNOLIST
);
4900 lck_mtx_lock(&req
->r_mtx
);
4901 req
->r_flags
&= ~R_IOD
;
4902 if (req
->r_resendtime
) { /* send later */
4903 nfs_asyncio_resend(req
);
4904 lck_mtx_unlock(&req
->r_mtx
);
4907 lck_mtx_unlock(&req
->r_mtx
);
4910 req
->r_flags
&= ~R_RESTART
;
4911 if ((error
= nfs_request_add_header(req
))) {
4914 if ((error
= nfs_request_send(req
, !asyncio
))) {
4920 nfs_request_wait(req
);
4921 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4929 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, error
);
4930 nfs_request_rele(req
);
4935 * Cancel a pending asynchronous NFS request.
4938 nfs_request_async_cancel(struct nfsreq
*req
)
4940 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, 0xD1ED1E);
4941 nfs_request_rele(req
);
4945 * Flag a request as being terminated.
4948 nfs_softterm(struct nfsreq
*req
)
4950 struct nfsmount
*nmp
= req
->r_nmp
;
4951 req
->r_flags
|= R_SOFTTERM
;
4952 req
->r_error
= ETIMEDOUT
;
4953 if (!(req
->r_flags
& R_CWND
) || nfs_mount_gone(nmp
)) {
4956 /* update congestion window */
4957 req
->r_flags
&= ~R_CWND
;
4958 lck_mtx_lock(&nmp
->nm_lock
);
4959 FSDBG(532, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4960 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4961 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4962 /* congestion window is open, poke the cwnd queue */
4963 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4964 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4965 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4968 lck_mtx_unlock(&nmp
->nm_lock
);
4972 * Ensure req isn't in use by the timer, then dequeue it.
4975 nfs_reqdequeue(struct nfsreq
*req
)
4977 lck_mtx_lock(nfs_request_mutex
);
4978 while (req
->r_lflags
& RL_BUSY
) {
4979 req
->r_lflags
|= RL_WAITING
;
4980 msleep(&req
->r_lflags
, nfs_request_mutex
, PSOCK
, "reqdeq", NULL
);
4982 if (req
->r_lflags
& RL_QUEUED
) {
4983 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
4984 req
->r_lflags
&= ~RL_QUEUED
;
4986 lck_mtx_unlock(nfs_request_mutex
);
4990 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
4991 * free()'d out from under it.
4994 nfs_reqbusy(struct nfsreq
*req
)
4996 if (req
->r_lflags
& RL_BUSY
) {
4997 panic("req locked");
4999 req
->r_lflags
|= RL_BUSY
;
5003 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5006 nfs_reqnext(struct nfsreq
*req
)
5008 struct nfsreq
* nextreq
;
5014 * We need to get and busy the next req before signalling the
5015 * current one, otherwise wakeup() may block us and we'll race to
5016 * grab the next req.
5018 nextreq
= TAILQ_NEXT(req
, r_chain
);
5019 if (nextreq
!= NULL
) {
5020 nfs_reqbusy(nextreq
);
5022 /* unbusy and signal. */
5023 req
->r_lflags
&= ~RL_BUSY
;
5024 if (req
->r_lflags
& RL_WAITING
) {
5025 req
->r_lflags
&= ~RL_WAITING
;
5026 wakeup(&req
->r_lflags
);
5032 * NFS request queue timer routine
5034 * Scan the NFS request queue for any requests that have timed out.
5036 * Alert the system of unresponsive servers.
5037 * Mark expired requests on soft mounts as terminated.
5038 * For UDP, mark/signal requests for retransmission.
5041 nfs_request_timer(__unused
void *param0
, __unused
void *param1
)
5044 struct nfsmount
*nmp
;
5045 int timeo
, maxtime
, finish_asyncio
, error
;
5047 TAILQ_HEAD(nfs_mount_pokeq
, nfsmount
) nfs_mount_poke_queue
;
5048 TAILQ_INIT(&nfs_mount_poke_queue
);
5051 lck_mtx_lock(nfs_request_mutex
);
5052 req
= TAILQ_FIRST(&nfs_reqq
);
5053 if (req
== NULL
) { /* no requests - turn timer off */
5054 nfs_request_timer_on
= 0;
5055 lck_mtx_unlock(nfs_request_mutex
);
5062 for (; req
!= NULL
; req
= nfs_reqnext(req
)) {
5065 NFS_SOCK_DBG("Found a request with out a mount!\n");
5068 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
5071 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0))) {
5072 if (req
->r_callback
.rcb_func
!= NULL
) {
5073 /* async I/O RPC needs to be finished */
5074 lck_mtx_lock(&req
->r_mtx
);
5075 req
->r_error
= error
;
5076 finish_asyncio
= !(req
->r_flags
& R_WAITSENT
);
5078 lck_mtx_unlock(&req
->r_mtx
);
5079 if (finish_asyncio
) {
5080 nfs_asyncio_finish(req
);
5086 lck_mtx_lock(&req
->r_mtx
);
5088 if (nmp
->nm_tprintf_initial_delay
&&
5089 ((req
->r_rexmit
> 2) || (req
->r_flags
& R_RESENDERR
)) &&
5090 ((req
->r_lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
5091 req
->r_lastmsg
= now
.tv_sec
;
5092 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5093 "not responding", 1);
5094 req
->r_flags
|= R_TPRINTFMSG
;
5095 lck_mtx_lock(&nmp
->nm_lock
);
5096 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
5097 lck_mtx_unlock(&nmp
->nm_lock
);
5098 /* we're not yet completely mounted and */
5099 /* we can't complete an RPC, so we fail */
5100 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5102 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5104 lck_mtx_unlock(&req
->r_mtx
);
5105 if (finish_asyncio
) {
5106 nfs_asyncio_finish(req
);
5110 lck_mtx_unlock(&nmp
->nm_lock
);
5114 * Put a reasonable limit on the maximum timeout,
5115 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5117 if (!(NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && !nfs_can_squish(nmp
)) {
5118 maxtime
= NFS_MAXTIMEO
;
5119 } else if ((req
->r_flags
& (R_SETUP
| R_RECOVER
)) ||
5120 ((nmp
->nm_reconnect_start
<= 0) || ((now
.tv_sec
- nmp
->nm_reconnect_start
) < 8))) {
5121 maxtime
= (NFS_MAXTIMEO
/ (nmp
->nm_timeouts
+ 1)) / 2;
5123 maxtime
= NFS_MINTIMEO
/ 4;
5127 * Check for request timeout.
5129 if (req
->r_rtt
>= 0) {
5131 lck_mtx_lock(&nmp
->nm_lock
);
5132 if (req
->r_flags
& R_RESENDERR
) {
5133 /* with resend errors, retry every few seconds */
5136 if (req
->r_procnum
== NFSPROC_NULL
&& req
->r_gss_ctx
!= NULL
) {
5137 timeo
= NFS_MINIDEMTIMEO
; // gss context setup
5138 } else if (NMFLAG(nmp
, DUMBTIMER
)) {
5139 timeo
= nmp
->nm_timeo
;
5141 timeo
= NFS_RTO(nmp
, proct
[req
->r_procnum
]);
5144 /* ensure 62.5 ms floor */
5145 while (16 * timeo
< hz
) {
5148 if (nmp
->nm_timeouts
> 0) {
5149 timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
5152 /* limit timeout to max */
5153 if (timeo
> maxtime
) {
5156 if (req
->r_rtt
<= timeo
) {
5157 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req
->r_rtt
, timeo
);
5158 lck_mtx_unlock(&nmp
->nm_lock
);
5159 lck_mtx_unlock(&req
->r_mtx
);
5162 /* The request has timed out */
5163 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5164 req
->r_procnum
, proct
[req
->r_procnum
],
5165 req
->r_xid
, req
->r_rtt
, timeo
, nmp
->nm_timeouts
,
5166 (now
.tv_sec
- req
->r_start
) * NFS_HZ
, maxtime
);
5167 if (nmp
->nm_timeouts
< 8) {
5170 if (nfs_mount_check_dead_timeout(nmp
)) {
5171 /* Unbusy this request */
5172 req
->r_lflags
&= ~RL_BUSY
;
5173 if (req
->r_lflags
& RL_WAITING
) {
5174 req
->r_lflags
&= ~RL_WAITING
;
5175 wakeup(&req
->r_lflags
);
5177 lck_mtx_unlock(&req
->r_mtx
);
5179 /* No need to poke this mount */
5180 if (nmp
->nm_sockflags
& NMSOCK_POKE
) {
5181 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
5182 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5184 /* Release our lock state, so we can become a zombie */
5185 lck_mtx_unlock(nfs_request_mutex
);
5188 * Note nfs_mount_make zombie(nmp) must be
5189 * called with nm_lock held. After doing some
5190 * work we release nm_lock in
5191 * nfs_make_mount_zombie with out acquiring any
5192 * other locks. (Later, in nfs_mount_zombie we
5193 * will acquire nfs_request_mutex, r_mtx,
5194 * nm_lock in that order). So we should not be
5195 * introducing deadlock here. We take a reference
5196 * on the mount so that its still there when we
5200 nfs_mount_make_zombie(nmp
);
5201 lck_mtx_unlock(&nmp
->nm_lock
);
5202 nfs_mount_rele(nmp
);
5205 * All the request for this mount have now been
5206 * removed from the request queue. Restart to
5207 * process the remaining mounts
5212 /* if it's been a few seconds, try poking the socket */
5213 if ((nmp
->nm_sotype
== SOCK_STREAM
) &&
5214 ((now
.tv_sec
- req
->r_start
) >= 3) &&
5215 !(nmp
->nm_sockflags
& (NMSOCK_POKE
| NMSOCK_UNMOUNT
)) &&
5216 (nmp
->nm_sockflags
& NMSOCK_READY
)) {
5217 nmp
->nm_sockflags
|= NMSOCK_POKE
;
5219 * We take a ref on the mount so that we know the mount will still be there
5220 * when we process the nfs_mount_poke_queue. An unmount request will block
5221 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5222 * the reference after calling nfs_sock_poke below;
5225 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5227 lck_mtx_unlock(&nmp
->nm_lock
);
5230 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5231 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& (R_SETUP
| R_RECOVER
| R_SOFT
))) &&
5232 ((req
->r_rexmit
>= req
->r_retry
) || /* too many */
5233 ((now
.tv_sec
- req
->r_start
) * NFS_HZ
> maxtime
))) { /* too long */
5234 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5235 lck_mtx_lock(&nmp
->nm_lock
);
5236 if (!(nmp
->nm_state
& NFSSTA_TIMEO
)) {
5237 lck_mtx_unlock(&nmp
->nm_lock
);
5238 /* make sure we note the unresponsive server */
5239 /* (maxtime may be less than tprintf delay) */
5240 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5241 "not responding", 1);
5242 req
->r_lastmsg
= now
.tv_sec
;
5243 req
->r_flags
|= R_TPRINTFMSG
;
5245 lck_mtx_unlock(&nmp
->nm_lock
);
5247 if (req
->r_flags
& R_NOINTR
) {
5248 /* don't terminate nointr requests on timeout */
5249 lck_mtx_unlock(&req
->r_mtx
);
5252 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5253 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
,
5254 now
.tv_sec
- req
->r_start
);
5256 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5258 lck_mtx_unlock(&req
->r_mtx
);
5259 if (finish_asyncio
) {
5260 nfs_asyncio_finish(req
);
5265 /* for TCP, only resend if explicitly requested */
5266 if ((nmp
->nm_sotype
== SOCK_STREAM
) && !(req
->r_flags
& R_MUSTRESEND
)) {
5267 if (++req
->r_rexmit
> NFS_MAXREXMIT
) {
5268 req
->r_rexmit
= NFS_MAXREXMIT
;
5271 lck_mtx_unlock(&req
->r_mtx
);
5276 * The request needs to be (re)sent. Kick the requester to resend it.
5277 * (unless it's already marked as needing a resend)
5279 if ((req
->r_flags
& R_MUSTRESEND
) && (req
->r_rtt
== -1)) {
5280 lck_mtx_unlock(&req
->r_mtx
);
5283 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5284 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
5285 req
->r_flags
|= R_MUSTRESEND
;
5288 if ((req
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
5289 nfs_asyncio_resend(req
);
5291 lck_mtx_unlock(&req
->r_mtx
);
5294 lck_mtx_unlock(nfs_request_mutex
);
5296 /* poke any sockets */
5297 while ((nmp
= TAILQ_FIRST(&nfs_mount_poke_queue
))) {
5298 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5300 nfs_mount_rele(nmp
);
5303 nfs_interval_timer_start(nfs_request_timer_call
, NFS_REQUESTDELAY
);
5307 * check a thread's proc for the "noremotehang" flag.
5310 nfs_noremotehang(thread_t thd
)
5312 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5313 return p
&& proc_noremotehang(p
);
5317 * Test for a termination condition pending on the process.
5318 * This is used to determine if we need to bail on a mount.
5319 * ETIMEDOUT is returned if there has been a soft timeout.
5320 * EINTR is returned if there is a signal pending that is not being ignored
5321 * and the mount is interruptable, or if we are a thread that is in the process
5322 * of cancellation (also SIGKILL posted).
5324 extern int sigprop
[NSIG
+ 1];
5326 nfs_sigintr(struct nfsmount
*nmp
, struct nfsreq
*req
, thread_t thd
, int nmplocked
)
5335 if (req
&& (req
->r_flags
& R_SOFTTERM
)) {
5336 return ETIMEDOUT
; /* request has been terminated. */
5338 if (req
&& (req
->r_flags
& R_NOINTR
)) {
5339 thd
= NULL
; /* don't check for signal on R_NOINTR */
5342 lck_mtx_lock(&nmp
->nm_lock
);
5344 if (nmp
->nm_state
& NFSSTA_FORCE
) {
5345 /* If a force unmount is in progress then fail. */
5347 } else if (vfs_isforce(nmp
->nm_mountp
)) {
5348 /* Someone is unmounting us, go soft and mark it. */
5349 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_SOFT
);
5350 nmp
->nm_state
|= NFSSTA_FORCE
;
5353 /* Check if the mount is marked dead. */
5354 if (!error
&& (nmp
->nm_state
& NFSSTA_DEAD
)) {
5359 * If the mount is hung and we've requested not to hang
5360 * on remote filesystems, then bail now.
5362 if (current_proc() != kernproc
&&
5363 !error
&& (nmp
->nm_state
& NFSSTA_TIMEO
) && nfs_noremotehang(thd
)) {
5368 lck_mtx_unlock(&nmp
->nm_lock
);
5374 /* may not have a thread for async I/O */
5375 if (thd
== NULL
|| current_proc() == kernproc
) {
5380 * Check if the process is aborted, but don't interrupt if we
5381 * were killed by a signal and this is the exiting thread which
5382 * is attempting to dump core.
5384 if (((p
= current_proc()) != kernproc
) && current_thread_aborted() &&
5385 (!(p
->p_acflag
& AXSIG
) || (p
->exit_thread
!= current_thread()) ||
5386 (p
->p_sigacts
== NULL
) ||
5387 (p
->p_sigacts
->ps_sig
< 1) || (p
->p_sigacts
->ps_sig
> NSIG
) ||
5388 !(sigprop
[p
->p_sigacts
->ps_sig
] & SA_CORE
))) {
5392 /* mask off thread and process blocked signals. */
5393 if (NMFLAG(nmp
, INTR
) && ((p
= get_bsdthreadtask_info(thd
))) &&
5394 proc_pendingsignals(p
, NFSINT_SIGMASK
)) {
5401 * Lock a socket against others.
5402 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5403 * and also to avoid race conditions between the processes with nfs requests
5404 * in progress when a reconnect is necessary.
5407 nfs_sndlock(struct nfsreq
*req
)
5409 struct nfsmount
*nmp
= req
->r_nmp
;
5411 int error
= 0, slpflag
= 0;
5412 struct timespec ts
= { 0, 0 };
5414 if (nfs_mount_gone(nmp
)) {
5418 lck_mtx_lock(&nmp
->nm_lock
);
5419 statep
= &nmp
->nm_state
;
5421 if (NMFLAG(nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
5424 while (*statep
& NFSSTA_SNDLOCK
) {
5425 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 1))) {
5428 *statep
|= NFSSTA_WANTSND
;
5429 if (nfs_noremotehang(req
->r_thread
)) {
5432 msleep(statep
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsndlck", &ts
);
5433 if (slpflag
== PCATCH
) {
5439 *statep
|= NFSSTA_SNDLOCK
;
5441 lck_mtx_unlock(&nmp
->nm_lock
);
5446 * Unlock the stream socket for others.
5449 nfs_sndunlock(struct nfsreq
*req
)
5451 struct nfsmount
*nmp
= req
->r_nmp
;
5452 int *statep
, wake
= 0;
5457 lck_mtx_lock(&nmp
->nm_lock
);
5458 statep
= &nmp
->nm_state
;
5459 if ((*statep
& NFSSTA_SNDLOCK
) == 0) {
5460 panic("nfs sndunlock");
5462 *statep
&= ~(NFSSTA_SNDLOCK
| NFSSTA_SENDING
);
5463 if (*statep
& NFSSTA_WANTSND
) {
5464 *statep
&= ~NFSSTA_WANTSND
;
5467 lck_mtx_unlock(&nmp
->nm_lock
);
5475 struct nfsmount
*nmp
,
5477 struct sockaddr
*saddr
,
5484 struct nfsm_chain
*nmrep
)
5486 int error
= 0, on
= 1, try, sendat
= 2, soproto
, recv
, optlen
, restoreto
= 0;
5487 socket_t newso
= NULL
;
5488 struct sockaddr_storage ss
;
5489 struct timeval orig_rcvto
, orig_sndto
, tv
= { 1, 0 };
5490 mbuf_t m
, mrep
= NULL
;
5492 uint32_t rxid
= 0, reply
= 0, reply_status
, rejected_status
;
5493 uint32_t verf_type
, verf_len
, accepted_status
;
5494 size_t readlen
, sentlen
;
5495 struct nfs_rpc_record_state nrrs
;
5498 /* create socket and set options */
5499 soproto
= (sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
;
5500 if ((error
= sock_socket(saddr
->sa_family
, sotype
, soproto
, NULL
, NULL
, &newso
))) {
5505 int level
= (saddr
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
5506 int optname
= (saddr
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
5507 int portrange
= IP_PORTRANGE_LOW
;
5508 error
= sock_setsockopt(newso
, level
, optname
, &portrange
, sizeof(portrange
));
5510 ss
.ss_len
= saddr
->sa_len
;
5511 ss
.ss_family
= saddr
->sa_family
;
5512 if (ss
.ss_family
== AF_INET
) {
5513 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
5514 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
5515 } else if (ss
.ss_family
== AF_INET6
) {
5516 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
5517 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
5522 error
= sock_bind(newso
, (struct sockaddr
*)&ss
);
5527 if (sotype
== SOCK_STREAM
) {
5528 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5531 error
= sock_connect(newso
, saddr
, MSG_DONTWAIT
);
5532 if (error
== EINPROGRESS
) {
5537 while ((error
= sock_connectwait(newso
, &tv
)) == EINPROGRESS
) {
5538 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5539 if (++count
>= NFS_AUX_CONNECTION_TIMEOUT
) {
5546 if (((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
)))) ||
5547 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
)))) ||
5548 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
))))) {
5553 /* make sure socket is using a one second timeout in this function */
5554 optlen
= sizeof(orig_rcvto
);
5555 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, &optlen
);
5557 optlen
= sizeof(orig_sndto
);
5558 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, &optlen
);
5561 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
));
5562 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
));
5567 if (sotype
== SOCK_STREAM
) {
5568 sendat
= 0; /* we only resend the request for UDP */
5569 nfs_rpc_record_state_init(&nrrs
);
5572 for (try = 0; try < timeo
; try++) {
5573 if ((error
= nfs_sigintr(nmp
, NULL
, !try ? NULL
: thd
, 0))) {
5576 if (!try || (try == sendat
)) {
5577 /* send the request (resending periodically for UDP) */
5578 if ((error
= mbuf_copym(mreq
, 0, MBUF_COPYALL
, MBUF_WAITOK
, &m
))) {
5581 bzero(&msg
, sizeof(msg
));
5582 if ((sotype
== SOCK_DGRAM
) && !sock_isconnected(so
)) {
5583 msg
.msg_name
= saddr
;
5584 msg
.msg_namelen
= saddr
->sa_len
;
5586 if ((error
= sock_sendmbuf(so
, &msg
, m
, 0, &sentlen
))) {
5594 /* wait for the response */
5595 if (sotype
== SOCK_STREAM
) {
5596 /* try to read (more of) record */
5597 error
= nfs_rpc_record_read(so
, &nrrs
, 0, &recv
, &mrep
);
5598 /* if we don't have the whole record yet, we'll keep trying */
5601 bzero(&msg
, sizeof(msg
));
5602 error
= sock_receivembuf(so
, &msg
, &mrep
, 0, &readlen
);
5604 if (error
== EWOULDBLOCK
) {
5608 /* parse the response */
5609 nfsm_chain_dissect_init(error
, nmrep
, mrep
);
5610 nfsm_chain_get_32(error
, nmrep
, rxid
);
5611 nfsm_chain_get_32(error
, nmrep
, reply
);
5613 if ((rxid
!= xid
) || (reply
!= RPC_REPLY
)) {
5616 nfsm_chain_get_32(error
, nmrep
, reply_status
);
5618 if (reply_status
== RPC_MSGDENIED
) {
5619 nfsm_chain_get_32(error
, nmrep
, rejected_status
);
5621 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
5624 nfsm_chain_get_32(error
, nmrep
, verf_type
); /* verifier flavor */
5625 nfsm_chain_get_32(error
, nmrep
, verf_len
); /* verifier length */
5628 nfsm_chain_adv(error
, nmrep
, nfsm_rndup(verf_len
));
5630 nfsm_chain_get_32(error
, nmrep
, accepted_status
);
5632 switch (accepted_status
) {
5636 case RPC_PROGUNAVAIL
:
5637 error
= EPROGUNAVAIL
;
5639 case RPC_PROGMISMATCH
:
5640 error
= EPROGMISMATCH
;
5642 case RPC_PROCUNAVAIL
:
5643 error
= EPROCUNAVAIL
;
5648 case RPC_SYSTEM_ERR
:
5657 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, sizeof(tv
));
5658 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, sizeof(tv
));
5661 sock_shutdown(newso
, SHUT_RDWR
);
5670 struct nfsmount
*nmp
,
5672 struct sockaddr
*sa
,
5679 thread_t thd
= vfs_context_thread(ctx
);
5680 kauth_cred_t cred
= vfs_context_ucred(ctx
);
5681 struct sockaddr_storage ss
;
5682 struct sockaddr
*saddr
= (struct sockaddr
*)&ss
;
5683 struct nfsm_chain nmreq
, nmrep
;
5685 int error
= 0, ip
, pmprog
, pmvers
, pmproc
;
5689 char uaddr
[MAX_IPv6_STR_LEN
+ 16];
5691 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5692 if (saddr
->sa_family
== AF_INET
) {
5696 pmproc
= PMAPPROC_GETPORT
;
5697 } else if (saddr
->sa_family
== AF_INET6
) {
5701 pmproc
= RPCBPROC_GETVERSADDR
;
5705 nfsm_chain_null(&nmreq
);
5706 nfsm_chain_null(&nmrep
);
5709 /* send portmapper request to get port/uaddr */
5711 ((struct sockaddr_in
*)saddr
)->sin_port
= htons(PMAPPORT
);
5713 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(PMAPPORT
);
5715 nfsm_chain_build_alloc_init(error
, &nmreq
, 8 * NFSX_UNSIGNED
);
5716 nfsm_chain_add_32(error
, &nmreq
, protocol
);
5717 nfsm_chain_add_32(error
, &nmreq
, vers
);
5719 nfsm_chain_add_32(error
, &nmreq
, ipproto
);
5720 nfsm_chain_add_32(error
, &nmreq
, 0);
5722 if (ipproto
== IPPROTO_TCP
) {
5723 nfsm_chain_add_string(error
, &nmreq
, "tcp6", 4);
5725 nfsm_chain_add_string(error
, &nmreq
, "udp6", 4);
5727 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* uaddr */
5728 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* owner */
5730 nfsm_chain_build_done(error
, &nmreq
);
5732 error
= nfsm_rpchead2(nmp
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5733 pmprog
, pmvers
, pmproc
, RPCAUTH_SYS
, cred
, NULL
, nmreq
.nmc_mhead
,
5736 nmreq
.nmc_mhead
= NULL
;
5737 error
= nfs_aux_request(nmp
, thd
, saddr
, so
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5738 mreq
, R_XID32(xid
), 0, timeo
, &nmrep
);
5740 /* grab port from portmap response */
5742 nfsm_chain_get_32(error
, &nmrep
, port
);
5744 ((struct sockaddr_in
*)sa
)->sin_port
= htons(port
);
5747 /* get uaddr string and convert to sockaddr */
5748 nfsm_chain_get_32(error
, &nmrep
, ualen
);
5750 if (ualen
> (sizeof(uaddr
) - 1)) {
5754 /* program is not available, just return a zero port */
5755 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5756 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(0);
5758 nfsm_chain_get_opaque(error
, &nmrep
, ualen
, uaddr
);
5760 uaddr
[ualen
] = '\0';
5761 if (!nfs_uaddr2sockaddr(uaddr
, saddr
)) {
5767 if ((error
== EPROGMISMATCH
) || (error
== EPROCUNAVAIL
) || (error
== EIO
) || (error
== EBADRPC
)) {
5768 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
5769 if (pmvers
== RPCBVERS4
) {
5770 /* fall back to v3 and GETADDR */
5772 pmproc
= RPCBPROC_GETADDR
;
5773 nfsm_chain_cleanup(&nmreq
);
5774 nfsm_chain_cleanup(&nmrep
);
5775 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5782 bcopy(saddr
, sa
, min(saddr
->sa_len
, sa
->sa_len
));
5786 nfsm_chain_cleanup(&nmreq
);
5787 nfsm_chain_cleanup(&nmrep
);
5792 nfs_msg(thread_t thd
,
5797 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5801 tpr
= tprintf_open(p
);
5806 tprintf(tpr
, "nfs server %s: %s, error %d\n", server
, msg
, error
);
5808 tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);
5814 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
5815 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
5816 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
5817 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
5818 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
5820 uint32_t nfs_squishy_flags
= NFS_SQUISH_MOBILE_ONLY
| NFS_SQUISH_AUTOMOUNTED_ONLY
| NFS_SQUISH_QUICK
;
5821 int32_t nfs_is_mobile
;
5823 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
5824 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
5827 * Could this mount be squished?
5830 nfs_can_squish(struct nfsmount
*nmp
)
5832 uint64_t flags
= vfs_flags(nmp
->nm_mountp
);
5833 int softsquish
= ((nfs_squishy_flags
& NFS_SQUISH_SOFT
) & NMFLAG(nmp
, SOFT
));
5835 if (!softsquish
&& (nfs_squishy_flags
& NFS_SQUISH_MOBILE_ONLY
) && nfs_is_mobile
== 0) {
5839 if ((nfs_squishy_flags
& NFS_SQUISH_AUTOMOUNTED_ONLY
) && (flags
& MNT_AUTOMOUNTED
) == 0) {
5847 * NFS mounts default to "rw,hard" - but frequently on mobile clients
5848 * the mount may become "not responding". It's desirable to be able
5849 * to unmount these dead mounts, but only if there is no risk of
5850 * losing data or crashing applications. A "squishy" NFS mount is one
5851 * that can be force unmounted with little risk of harm.
5853 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
5854 * in a squishy state iff it is allowed to be squishy and there are no
5855 * dirty pages and there are no mmapped files and there are no files
5856 * open for write. Mounts are allowed to be squishy is controlled by
5857 * the settings of the nfs_squishy_flags and its mobility state. These
5858 * flags can be set by sysctls.
5860 * If nfs_is_squishy determines that we are in a squishy state we will
5861 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
5862 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
5863 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
5864 * update the current dead timeout with the mount's nm_deadtimeout
5865 * value set at mount time.
5867 * Assumes that nm_lock is held.
5869 * Note this routine is racey, but its effects on setting the
5870 * dead timeout only have effects when we're in trouble and are likely
5871 * to stay that way. Since by default its only for automounted
5872 * volumes on mobile machines; this is a reasonable trade off between
5873 * data integrity and user experience. It can be disabled or set via
5878 nfs_is_squishy(struct nfsmount
*nmp
)
5880 mount_t mp
= nmp
->nm_mountp
;
5882 int timeo
= (nfs_squishy_flags
& NFS_SQUISH_QUICK
) ? NFS_SQUISHY_QUICKTIMEOUT
: NFS_SQUISHY_DEADTIMEOUT
;
5884 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
5885 vfs_statfs(mp
)->f_mntfromname
, nmp
->nm_curdeadtimeout
, nfs_is_mobile
);
5887 if (!nfs_can_squish(nmp
)) {
5891 timeo
= (nmp
->nm_deadtimeout
> timeo
) ? max(nmp
->nm_deadtimeout
/ 8, timeo
) : timeo
;
5892 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp
->nm_writers
, nmp
->nm_mappers
, timeo
);
5894 if (nmp
->nm_writers
== 0 && nmp
->nm_mappers
== 0) {
5895 uint64_t flags
= mp
? vfs_flags(mp
) : 0;
5899 * Walk the nfs nodes and check for dirty buffers it we're not
5900 * RDONLY and we've not already been declared as squishy since
5901 * this can be a bit expensive.
5903 if (!(flags
& MNT_RDONLY
) && !(nmp
->nm_state
& NFSSTA_SQUISHY
)) {
5904 squishy
= !nfs_mount_is_dirty(mp
);
5910 nmp
->nm_state
|= NFSSTA_SQUISHY
;
5912 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
5915 nmp
->nm_curdeadtimeout
= squishy
? timeo
: nmp
->nm_deadtimeout
;
5917 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp
->nm_curdeadtimeout
);
5923 * On a send operation, if we can't reach the server and we've got only one server to talk to
5924 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
5925 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
5928 nfs_is_dead(int error
, struct nfsmount
*nmp
)
5932 lck_mtx_lock(&nmp
->nm_lock
);
5933 if (nmp
->nm_state
& NFSSTA_DEAD
) {
5934 lck_mtx_unlock(&nmp
->nm_lock
);
5938 if ((error
!= ENETUNREACH
&& error
!= EHOSTUNREACH
&& error
!= EADDRNOTAVAIL
) ||
5939 !(nmp
->nm_locations
.nl_numlocs
== 1 && nmp
->nm_locations
.nl_locations
[0]->nl_servcount
== 1)) {
5940 lck_mtx_unlock(&nmp
->nm_lock
);
5944 if ((nfs_squishy_flags
& NFS_SQUISH_QUICK
) && nfs_is_squishy(nmp
)) {
5945 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
5946 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
5947 lck_mtx_unlock(&nmp
->nm_lock
);
5948 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
5949 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
5952 lck_mtx_unlock(&nmp
->nm_lock
);
5957 * If we've experienced timeouts and we're not really a
5958 * classic hard mount, then just return cached data to
5959 * the caller instead of likely hanging on an RPC.
5962 nfs_use_cache(struct nfsmount
*nmp
)
5965 *%%% We always let mobile users goto the cache,
5966 * perhaps we should not even require them to have
5969 int cache_ok
= (nfs_is_mobile
|| NMFLAG(nmp
, SOFT
) ||
5970 nfs_can_squish(nmp
) || nmp
->nm_deadtimeout
);
5972 int timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5975 * So if we have a timeout and we're not really a hard hard-mount,
5976 * return 1 to not get things out of the cache.
5979 return (nmp
->nm_state
& timeoutmask
) && cache_ok
;
5983 * Log a message that nfs or lockd server is unresponsive. Check if we
5984 * can be squished and if we can, or that our dead timeout has
5985 * expired, and we're not holding state, set our mount as dead, remove
5986 * our mount state and ask to be unmounted. If we are holding state
5987 * we're being called from the nfs_request_timer and will soon detect
5988 * that we need to unmount.
5991 nfs_down(struct nfsmount
*nmp
, thread_t thd
, int error
, int flags
, const char *msg
, int holding_state
)
5993 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
5994 uint32_t do_vfs_signal
= 0;
5997 if (nfs_mount_gone(nmp
)) {
6001 lck_mtx_lock(&nmp
->nm_lock
);
6003 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6004 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6005 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6007 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6009 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6010 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6012 if ((flags
& NFSSTA_TIMEO
) && !(nmp
->nm_state
& NFSSTA_TIMEO
)) {
6013 nmp
->nm_state
|= NFSSTA_TIMEO
;
6015 if ((flags
& NFSSTA_LOCKTIMEO
) && !(nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6016 nmp
->nm_state
|= NFSSTA_LOCKTIMEO
;
6018 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && !(nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6019 nmp
->nm_state
|= NFSSTA_JUKEBOXTIMEO
;
6022 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6024 nfs_is_squishy(nmp
);
6026 if (unresponsive
&& (nmp
->nm_curdeadtimeout
> 0)) {
6028 if (!wasunresponsive
) {
6029 nmp
->nm_deadto_start
= now
.tv_sec
;
6030 nfs_mount_sock_thread_wake(nmp
);
6031 } else if ((now
.tv_sec
- nmp
->nm_deadto_start
) > nmp
->nm_curdeadtimeout
&& !holding_state
) {
6032 if (!(nmp
->nm_state
& NFSSTA_DEAD
)) {
6033 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
6034 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
6036 do_vfs_signal
= VQ_DEAD
;
6039 lck_mtx_unlock(&nmp
->nm_lock
);
6041 if (do_vfs_signal
== VQ_DEAD
&& !(nmp
->nm_state
& NFSSTA_DEAD
)) {
6042 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
6043 } else if (softnobrowse
|| wasunresponsive
|| !unresponsive
) {
6046 do_vfs_signal
= VQ_NOTRESP
;
6048 if (do_vfs_signal
) {
6049 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, do_vfs_signal
, 0);
6052 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, error
);
6056 nfs_up(struct nfsmount
*nmp
, thread_t thd
, int flags
, const char *msg
)
6058 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
6061 if (nfs_mount_gone(nmp
)) {
6066 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, 0);
6069 lck_mtx_lock(&nmp
->nm_lock
);
6071 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6072 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6073 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6075 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6077 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6078 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6080 if ((flags
& NFSSTA_TIMEO
) && (nmp
->nm_state
& NFSSTA_TIMEO
)) {
6081 nmp
->nm_state
&= ~NFSSTA_TIMEO
;
6083 if ((flags
& NFSSTA_LOCKTIMEO
) && (nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6084 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
6086 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && (nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6087 nmp
->nm_state
&= ~NFSSTA_JUKEBOXTIMEO
;
6090 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6092 nmp
->nm_deadto_start
= 0;
6093 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
6094 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
6095 lck_mtx_unlock(&nmp
->nm_lock
);
6100 do_vfs_signal
= (wasunresponsive
&& !unresponsive
);
6102 if (do_vfs_signal
) {
6103 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_NOTRESP
, 1);
6108 #endif /* NFSCLIENT */
6113 * Generate the rpc reply header
6114 * siz arg. is used to decide if adding a cluster is worthwhile
6118 struct nfsrv_descript
*nd
,
6119 __unused
struct nfsrv_sock
*slp
,
6120 struct nfsm_chain
*nmrepp
,
6125 struct nfsm_chain nmrep
;
6128 err
= nd
->nd_repstat
;
6129 if (err
&& (nd
->nd_vers
== NFS_VER2
)) {
6134 * If this is a big reply, use a cluster else
6135 * try and leave leading space for the lower level headers.
6137 siz
+= RPC_REPLYSIZ
;
6138 if (siz
>= nfs_mbuf_minclsize
) {
6139 error
= mbuf_getpacket(MBUF_WAITOK
, &mrep
);
6141 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mrep
);
6144 /* unable to allocate packet */
6145 /* XXX should we keep statistics for these errors? */
6148 if (siz
< nfs_mbuf_minclsize
) {
6149 /* leave space for lower level headers */
6150 tl
= mbuf_data(mrep
);
6151 tl
+= 80 / sizeof(*tl
); /* XXX max_hdr? XXX */
6152 mbuf_setdata(mrep
, tl
, 6 * NFSX_UNSIGNED
);
6154 nfsm_chain_init(&nmrep
, mrep
);
6155 nfsm_chain_add_32(error
, &nmrep
, nd
->nd_retxid
);
6156 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
6157 if (err
== ERPCMISMATCH
|| (err
& NFSERR_AUTHERR
)) {
6158 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
6159 if (err
& NFSERR_AUTHERR
) {
6160 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
6161 nfsm_chain_add_32(error
, &nmrep
, (err
& ~NFSERR_AUTHERR
));
6163 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
6164 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6165 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6169 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
6170 if (nd
->nd_gss_context
!= NULL
) {
6171 /* RPCSEC_GSS verifier */
6172 error
= nfs_gss_svc_verf_put(nd
, &nmrep
);
6174 nfsm_chain_add_32(error
, &nmrep
, RPC_SYSTEM_ERR
);
6178 /* RPCAUTH_NULL verifier */
6179 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
6180 nfsm_chain_add_32(error
, &nmrep
, 0);
6182 /* accepted status */
6185 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
6188 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
6189 /* XXX hard coded versions? */
6190 nfsm_chain_add_32(error
, &nmrep
, NFS_VER2
);
6191 nfsm_chain_add_32(error
, &nmrep
, NFS_VER3
);
6194 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
6197 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
6200 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
6201 if (nd
->nd_gss_context
!= NULL
) {
6202 error
= nfs_gss_svc_prepare_reply(nd
, &nmrep
);
6204 if (err
!= NFSERR_RETVOID
) {
6205 nfsm_chain_add_32(error
, &nmrep
,
6206 (err
? nfsrv_errmap(nd
, err
) : 0));
6213 nfsm_chain_build_done(error
, &nmrep
);
6215 /* error composing reply header */
6216 /* XXX should we keep statistics for these errors? */
6222 if ((err
!= 0) && (err
!= NFSERR_RETVOID
)) {
6223 OSAddAtomic64(1, &nfsstats
.srvrpc_errs
);
6229 * The nfs server send routine.
6231 * - return EINTR or ERESTART if interrupted by a signal
6232 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6233 * - do any cleanup required by recoverable socket errors (???)
6236 nfsrv_send(struct nfsrv_sock
*slp
, mbuf_t nam
, mbuf_t top
)
6239 socket_t so
= slp
->ns_so
;
6240 struct sockaddr
*sendnam
;
6243 bzero(&msg
, sizeof(msg
));
6244 if (nam
&& !sock_isconnected(so
) && (slp
->ns_sotype
!= SOCK_STREAM
)) {
6245 if ((sendnam
= mbuf_data(nam
))) {
6246 msg
.msg_name
= (caddr_t
)sendnam
;
6247 msg
.msg_namelen
= sendnam
->sa_len
;
6250 error
= sock_sendmbuf(so
, &msg
, top
, 0, NULL
);
6254 log(LOG_INFO
, "nfsd send error %d\n", error
);
6256 if ((error
== EWOULDBLOCK
) && (slp
->ns_sotype
== SOCK_STREAM
)) {
6257 error
= EPIPE
; /* zap TCP sockets if they time out on send */
6259 /* Handle any recoverable (soft) socket errors here. (???) */
6260 if (error
!= EINTR
&& error
!= ERESTART
&& error
!= EIO
&&
6261 error
!= EWOULDBLOCK
&& error
!= EPIPE
) {
6269 * Socket upcall routine for the nfsd sockets.
6270 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6271 * Essentially do as much as possible non-blocking, else punt and it will
6272 * be called with MBUF_WAITOK from an nfsd.
6275 nfsrv_rcv(socket_t so
, void *arg
, int waitflag
)
6277 struct nfsrv_sock
*slp
= arg
;
6279 if (!nfsd_thread_count
|| !(slp
->ns_flag
& SLP_VALID
)) {
6283 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
6284 nfsrv_rcv_locked(so
, slp
, waitflag
);
6285 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6288 nfsrv_rcv_locked(socket_t so
, struct nfsrv_sock
*slp
, int waitflag
)
6290 mbuf_t m
, mp
, mhck
, m2
;
6291 int ns_flag
= 0, error
;
6295 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
6296 if (waitflag
== MBUF_DONTWAIT
) {
6297 lck_rw_done(&slp
->ns_rwlock
);
6304 * Define this to test for nfsds handling this under heavy load.
6306 if (waitflag
== MBUF_DONTWAIT
) {
6307 ns_flag
= SLP_NEEDQ
;
6311 if (slp
->ns_sotype
== SOCK_STREAM
) {
6313 * If there are already records on the queue, defer soreceive()
6314 * to an(other) nfsd so that there is feedback to the TCP layer that
6315 * the nfs servers are heavily loaded.
6318 ns_flag
= SLP_NEEDQ
;
6325 bytes_read
= 1000000000;
6326 error
= sock_receivembuf(so
, NULL
, &mp
, MSG_DONTWAIT
, &bytes_read
);
6327 if (error
|| mp
== NULL
) {
6328 if (error
== EWOULDBLOCK
) {
6329 ns_flag
= (waitflag
== MBUF_DONTWAIT
) ? SLP_NEEDQ
: 0;
6331 ns_flag
= SLP_DISCONN
;
6336 if (slp
->ns_rawend
) {
6337 if ((error
= mbuf_setnext(slp
->ns_rawend
, m
))) {
6338 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error
);
6340 slp
->ns_cc
+= bytes_read
;
6343 slp
->ns_cc
= bytes_read
;
6345 while ((m2
= mbuf_next(m
))) {
6351 * Now try and parse record(s) out of the raw stream data.
6353 error
= nfsrv_getstream(slp
, waitflag
);
6355 if (error
== EPERM
) {
6356 ns_flag
= SLP_DISCONN
;
6358 ns_flag
= SLP_NEEDQ
;
6362 struct sockaddr_storage nam
;
6364 if (slp
->ns_reccnt
>= nfsrv_sock_max_rec_queue_length
) {
6365 /* already have max # RPC records queued on this socket */
6366 ns_flag
= SLP_NEEDQ
;
6370 bzero(&msg
, sizeof(msg
));
6371 msg
.msg_name
= (caddr_t
)&nam
;
6372 msg
.msg_namelen
= sizeof(nam
);
6375 bytes_read
= 1000000000;
6376 error
= sock_receivembuf(so
, &msg
, &mp
, MSG_DONTWAIT
| MSG_NEEDSA
, &bytes_read
);
6378 if (msg
.msg_name
&& (mbuf_get(MBUF_WAITOK
, MBUF_TYPE_SONAME
, &mhck
) == 0)) {
6379 mbuf_setlen(mhck
, nam
.ss_len
);
6380 bcopy(&nam
, mbuf_data(mhck
), nam
.ss_len
);
6382 if (mbuf_setnext(m
, mp
)) {
6383 /* trouble... just drop it */
6384 printf("nfsrv_rcv: mbuf_setnext failed\n");
6391 if (slp
->ns_recend
) {
6392 mbuf_setnextpkt(slp
->ns_recend
, m
);
6395 slp
->ns_flag
|= SLP_DOREC
;
6398 mbuf_setnextpkt(m
, NULL
);
6405 * Now try and process the request records, non-blocking.
6409 slp
->ns_flag
|= ns_flag
;
6411 if (waitflag
== MBUF_DONTWAIT
) {
6412 int wake
= (slp
->ns_flag
& SLP_WORKTODO
);
6413 lck_rw_done(&slp
->ns_rwlock
);
6414 if (wake
&& nfsd_thread_count
) {
6415 lck_mtx_lock(nfsd_mutex
);
6416 nfsrv_wakenfsd(slp
);
6417 lck_mtx_unlock(nfsd_mutex
);
6423 * Try and extract an RPC request from the mbuf data list received on a
6424 * stream socket. The "waitflag" argument indicates whether or not it
6428 nfsrv_getstream(struct nfsrv_sock
*slp
, int waitflag
)
6431 char *cp1
, *cp2
, *mdata
;
6432 int len
, mlen
, error
;
6433 mbuf_t om
, m2
, recm
;
6436 if (slp
->ns_flag
& SLP_GETSTREAM
) {
6437 panic("nfs getstream");
6439 slp
->ns_flag
|= SLP_GETSTREAM
;
6441 if (slp
->ns_reclen
== 0) {
6442 if (slp
->ns_cc
< NFSX_UNSIGNED
) {
6443 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6447 mdata
= mbuf_data(m
);
6449 if (mlen
>= NFSX_UNSIGNED
) {
6450 bcopy(mdata
, (caddr_t
)&recmark
, NFSX_UNSIGNED
);
6451 mdata
+= NFSX_UNSIGNED
;
6452 mlen
-= NFSX_UNSIGNED
;
6453 mbuf_setdata(m
, mdata
, mlen
);
6455 cp1
= (caddr_t
)&recmark
;
6457 while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
6465 mbuf_setdata(m
, cp2
, mlen
);
6468 slp
->ns_cc
-= NFSX_UNSIGNED
;
6469 recmark
= ntohl(recmark
);
6470 slp
->ns_reclen
= recmark
& ~0x80000000;
6471 if (recmark
& 0x80000000) {
6472 slp
->ns_flag
|= SLP_LASTFRAG
;
6474 slp
->ns_flag
&= ~SLP_LASTFRAG
;
6476 if (slp
->ns_reclen
<= 0 || slp
->ns_reclen
> NFS_MAXPACKET
) {
6477 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6483 * Now get the record part.
6485 * Note that slp->ns_reclen may be 0. Linux sometimes
6486 * generates 0-length RPCs
6489 if (slp
->ns_cc
== slp
->ns_reclen
) {
6491 slp
->ns_raw
= slp
->ns_rawend
= NULL
;
6492 slp
->ns_cc
= slp
->ns_reclen
= 0;
6493 } else if (slp
->ns_cc
> slp
->ns_reclen
) {
6497 mdata
= mbuf_data(m
);
6499 while (len
< slp
->ns_reclen
) {
6500 if ((len
+ mlen
) > slp
->ns_reclen
) {
6501 if (mbuf_copym(m
, 0, slp
->ns_reclen
- len
, waitflag
, &m2
)) {
6502 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6506 if (mbuf_setnext(om
, m2
)) {
6507 /* trouble... just drop it */
6508 printf("nfsrv_getstream: mbuf_setnext failed\n");
6510 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6517 mdata
+= slp
->ns_reclen
- len
;
6518 mlen
-= slp
->ns_reclen
- len
;
6519 mbuf_setdata(m
, mdata
, mlen
);
6520 len
= slp
->ns_reclen
;
6521 } else if ((len
+ mlen
) == slp
->ns_reclen
) {
6526 if (mbuf_setnext(om
, NULL
)) {
6527 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6528 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6532 mdata
= mbuf_data(m
);
6538 mdata
= mbuf_data(m
);
6545 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6550 * Accumulate the fragments into a record.
6552 if (slp
->ns_frag
== NULL
) {
6553 slp
->ns_frag
= recm
;
6556 while ((m2
= mbuf_next(m
))) {
6559 if ((error
= mbuf_setnext(m
, recm
))) {
6560 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error
);
6563 if (slp
->ns_flag
& SLP_LASTFRAG
) {
6564 if (slp
->ns_recend
) {
6565 mbuf_setnextpkt(slp
->ns_recend
, slp
->ns_frag
);
6567 slp
->ns_rec
= slp
->ns_frag
;
6568 slp
->ns_flag
|= SLP_DOREC
;
6570 slp
->ns_recend
= slp
->ns_frag
;
6571 slp
->ns_frag
= NULL
;
6577 * Parse an RPC header.
6581 struct nfsrv_sock
*slp
,
6583 struct nfsrv_descript
**ndp
)
6587 struct nfsrv_descript
*nd
;
6591 if (!(slp
->ns_flag
& (SLP_VALID
| SLP_DOREC
)) || (slp
->ns_rec
== NULL
)) {
6594 MALLOC_ZONE(nd
, struct nfsrv_descript
*,
6595 sizeof(struct nfsrv_descript
), M_NFSRVDESC
, M_WAITOK
);
6600 slp
->ns_rec
= mbuf_nextpkt(m
);
6602 mbuf_setnextpkt(m
, NULL
);
6604 slp
->ns_flag
&= ~SLP_DOREC
;
6605 slp
->ns_recend
= NULL
;
6608 if (mbuf_type(m
) == MBUF_TYPE_SONAME
) {
6611 if ((error
= mbuf_setnext(nam
, NULL
))) {
6612 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error
);
6618 nfsm_chain_dissect_init(error
, &nd
->nd_nmreq
, m
);
6620 error
= nfsrv_getreq(nd
);
6626 if (nd
->nd_gss_context
) {
6627 nfs_gss_svc_ctx_deref(nd
->nd_gss_context
);
6629 FREE_ZONE(nd
, sizeof(*nd
), M_NFSRVDESC
);
6639 * Parse an RPC request
6641 * - fill in the cred struct.
6644 nfsrv_getreq(struct nfsrv_descript
*nd
)
6646 struct nfsm_chain
*nmreq
;
6648 u_int32_t nfsvers
, auth_type
;
6656 nd
->nd_gss_context
= NULL
;
6657 nd
->nd_gss_seqnum
= 0;
6658 nd
->nd_gss_mb
= NULL
;
6660 user_id
= group_id
= -2;
6661 val
= auth_type
= len
= 0;
6663 nmreq
= &nd
->nd_nmreq
;
6664 nfsm_chain_get_32(error
, nmreq
, nd
->nd_retxid
); // XID
6665 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Call
6666 if (!error
&& (val
!= RPC_CALL
)) {
6671 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Version
6673 if (val
!= RPC_VER2
) {
6674 nd
->nd_repstat
= ERPCMISMATCH
;
6675 nd
->nd_procnum
= NFSPROC_NOOP
;
6678 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Program Number
6680 if (val
!= NFS_PROG
) {
6681 nd
->nd_repstat
= EPROGUNAVAIL
;
6682 nd
->nd_procnum
= NFSPROC_NOOP
;
6685 nfsm_chain_get_32(error
, nmreq
, nfsvers
);// NFS Version Number
6687 if ((nfsvers
< NFS_VER2
) || (nfsvers
> NFS_VER3
)) {
6688 nd
->nd_repstat
= EPROGMISMATCH
;
6689 nd
->nd_procnum
= NFSPROC_NOOP
;
6692 nd
->nd_vers
= nfsvers
;
6693 nfsm_chain_get_32(error
, nmreq
, nd
->nd_procnum
);// NFS Procedure Number
6695 if ((nd
->nd_procnum
>= NFS_NPROCS
) ||
6696 ((nd
->nd_vers
== NFS_VER2
) && (nd
->nd_procnum
> NFSV2PROC_STATFS
))) {
6697 nd
->nd_repstat
= EPROCUNAVAIL
;
6698 nd
->nd_procnum
= NFSPROC_NOOP
;
6701 if (nfsvers
!= NFS_VER3
) {
6702 nd
->nd_procnum
= nfsv3_procid
[nd
->nd_procnum
];
6704 nfsm_chain_get_32(error
, nmreq
, auth_type
); // Auth Flavor
6705 nfsm_chain_get_32(error
, nmreq
, len
); // Auth Length
6706 if (!error
&& (len
< 0 || len
> RPCAUTH_MAXSIZ
)) {
6711 /* Handle authentication */
6712 if (auth_type
== RPCAUTH_SYS
) {
6713 struct posix_cred temp_pcred
;
6714 if (nd
->nd_procnum
== NFSPROC_NULL
) {
6717 nd
->nd_sec
= RPCAUTH_SYS
;
6718 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // skip stamp
6719 nfsm_chain_get_32(error
, nmreq
, len
); // hostname length
6720 if (len
< 0 || len
> NFS_MAXNAMLEN
) {
6723 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
)); // skip hostname
6726 /* create a temporary credential using the bits from the wire */
6727 bzero(&temp_pcred
, sizeof(temp_pcred
));
6728 nfsm_chain_get_32(error
, nmreq
, user_id
);
6729 nfsm_chain_get_32(error
, nmreq
, group_id
);
6730 temp_pcred
.cr_groups
[0] = group_id
;
6731 nfsm_chain_get_32(error
, nmreq
, len
); // extra GID count
6732 if ((len
< 0) || (len
> RPCAUTH_UNIXGIDS
)) {
6736 for (i
= 1; i
<= len
; i
++) {
6738 nfsm_chain_get_32(error
, nmreq
, temp_pcred
.cr_groups
[i
]);
6740 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
);
6744 ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (len
+ 1);
6746 nfsrv_group_sort(&temp_pcred
.cr_groups
[0], ngroups
);
6748 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
6749 nfsm_chain_get_32(error
, nmreq
, len
); // verifier length
6750 if (len
< 0 || len
> RPCAUTH_MAXSIZ
) {
6754 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
));
6757 /* request creation of a real credential */
6758 temp_pcred
.cr_uid
= user_id
;
6759 temp_pcred
.cr_ngroups
= ngroups
;
6760 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
6761 if (nd
->nd_cr
== NULL
) {
6762 nd
->nd_repstat
= ENOMEM
;
6763 nd
->nd_procnum
= NFSPROC_NOOP
;
6766 } else if (auth_type
== RPCSEC_GSS
) {
6767 error
= nfs_gss_svc_cred_get(nd
, nmreq
);
6769 if (error
== EINVAL
) {
6770 goto nfsmout
; // drop the request
6772 nd
->nd_repstat
= error
;
6773 nd
->nd_procnum
= NFSPROC_NOOP
;
6777 if (nd
->nd_procnum
== NFSPROC_NULL
) { // assume it's AUTH_NONE
6780 nd
->nd_repstat
= (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
6781 nd
->nd_procnum
= NFSPROC_NOOP
;
6786 if (IS_VALID_CRED(nd
->nd_cr
)) {
6787 kauth_cred_unref(&nd
->nd_cr
);
6789 nfsm_chain_cleanup(nmreq
);
6794 * Search for a sleeping nfsd and wake it up.
6795 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
6796 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
6797 * Note: Must be called with nfsd_mutex held.
6800 nfsrv_wakenfsd(struct nfsrv_sock
*slp
)
6804 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
6808 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
6809 /* if there's work to do on this socket, make sure it's queued up */
6810 if ((slp
->ns_flag
& SLP_WORKTODO
) && !(slp
->ns_flag
& SLP_QUEUED
)) {
6811 TAILQ_INSERT_TAIL(&nfsrv_sockwait
, slp
, ns_svcq
);
6812 slp
->ns_flag
|= SLP_WAITQ
;
6814 lck_rw_done(&slp
->ns_rwlock
);
6816 /* wake up a waiting nfsd, if possible */
6817 nd
= TAILQ_FIRST(&nfsd_queue
);
6822 TAILQ_REMOVE(&nfsd_queue
, nd
, nfsd_queue
);
6823 nd
->nfsd_flag
&= ~NFSD_WAITING
;
6827 #endif /* NFSSERVER */