2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
69 * Socket operations for use by nfs
72 #include <sys/param.h>
73 #include <sys/systm.h>
75 #include <sys/signalvar.h>
76 #include <sys/kauth.h>
77 #include <sys/mount_internal.h>
78 #include <sys/kernel.h>
79 #include <sys/kpi_mbuf.h>
80 #include <sys/malloc.h>
81 #include <sys/vnode.h>
82 #include <sys/domain.h>
83 #include <sys/protosw.h>
84 #include <sys/socket.h>
85 #include <sys/syslog.h>
86 #include <sys/tprintf.h>
87 #include <libkern/OSAtomic.h>
90 #include <kern/clock.h>
91 #include <kern/task.h>
92 #include <kern/thread.h>
93 #include <kern/thread_call.h>
97 #include <netinet/in.h>
98 #include <netinet/tcp.h>
100 #include <nfs/rpcv2.h>
101 #include <nfs/krpc.h>
102 #include <nfs/nfsproto.h>
104 #include <nfs/xdr_subs.h>
105 #include <nfs/nfsm_subs.h>
106 #include <nfs/nfs_gss.h>
107 #include <nfs/nfsmount.h>
108 #include <nfs/nfsnode.h>
111 boolean_t
current_thread_aborted(void);
112 kern_return_t
thread_terminate(thread_t
);
116 int nfsrv_sock_max_rec_queue_length
= 128; /* max # RPC records queued on (UDP) socket */
118 int nfsrv_getstream(struct nfsrv_sock
*,int);
119 int nfsrv_getreq(struct nfsrv_descript
*);
120 extern int nfsv3_procid
[NFS_NPROCS
];
121 #endif /* NFSSERVER */
124 * compare two sockaddr structures
127 nfs_sockaddr_cmp(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
133 if (sa1
->sa_family
!= sa2
->sa_family
)
134 return ((sa1
->sa_family
< sa2
->sa_family
) ? -1 : 1);
135 if (sa1
->sa_len
!= sa2
->sa_len
)
136 return ((sa1
->sa_len
< sa2
->sa_len
) ? -1 : 1);
137 if (sa1
->sa_family
== AF_INET
)
138 return (bcmp(&((struct sockaddr_in
*)sa1
)->sin_addr
,
139 &((struct sockaddr_in
*)sa2
)->sin_addr
, sizeof(((struct sockaddr_in
*)sa1
)->sin_addr
)));
140 if (sa1
->sa_family
== AF_INET6
)
141 return (bcmp(&((struct sockaddr_in6
*)sa1
)->sin6_addr
,
142 &((struct sockaddr_in6
*)sa2
)->sin6_addr
, sizeof(((struct sockaddr_in6
*)sa1
)->sin6_addr
)));
148 int nfs_reconnect(struct nfsmount
*);
149 int nfs_connect_setup(struct nfsmount
*);
150 void nfs_mount_sock_thread(void *, wait_result_t
);
151 void nfs_udp_rcv(socket_t
, void*, int);
152 void nfs_tcp_rcv(socket_t
, void*, int);
153 void nfs_sock_poke(struct nfsmount
*);
154 void nfs_request_match_reply(struct nfsmount
*, mbuf_t
);
155 void nfs_reqdequeue(struct nfsreq
*);
156 void nfs_reqbusy(struct nfsreq
*);
157 struct nfsreq
*nfs_reqnext(struct nfsreq
*);
158 int nfs_wait_reply(struct nfsreq
*);
159 void nfs_softterm(struct nfsreq
*);
161 #ifdef NFS_SOCKET_DEBUGGING
162 #define NFS_SOCK_DBG(X) printf X
164 #define NFS_SOCK_DBG(X)
168 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
169 * Use the mean and mean deviation of rtt for the appropriate type of rpc
170 * for the frequent rpcs and a default for the others.
171 * The justification for doing "other" this way is that these rpcs
172 * happen so infrequently that timer est. would probably be stale.
173 * Also, since many of these rpcs are
174 * non-idempotent, a conservative timeout is desired.
175 * getattr, lookup - A+2D
179 #define NFS_RTO(n, t) \
180 ((t) == 0 ? (n)->nm_timeo : \
182 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
183 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
184 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
185 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
188 * Defines which timer to use for the procnum.
195 static int proct
[NFS_NPROCS
] = {
196 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0
200 * There is a congestion window for outstanding rpcs maintained per mount
201 * point. The cwnd size is adjusted in roughly the way that:
202 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
203 * SIGCOMM '88". ACM, August 1988.
204 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
205 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
206 * of rpcs is in progress.
207 * (The sent count and cwnd are scaled for integer arith.)
208 * Variants of "slow start" were tried and were found to be too much of a
209 * performance hit (ave. rtt 3 times larger),
210 * I suspect due to the large rtt that nfs rpcs have.
212 #define NFS_CWNDSCALE 256
213 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
214 static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
217 * Increment location index to next address/server/location.
220 nfs_location_next(struct nfs_fs_locations
*nlp
, struct nfs_location_index
*nlip
)
222 uint8_t loc
= nlip
->nli_loc
;
223 uint8_t serv
= nlip
->nli_serv
;
224 uint8_t addr
= nlip
->nli_addr
;
226 /* move to next address */
228 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
229 /* no more addresses on current server, go to first address of next server */
233 if (serv
>= nlp
->nl_locations
[loc
]->nl_servcount
) {
234 /* no more servers on current location, go to first server of next location */
237 if (loc
>= nlp
->nl_numlocs
)
238 loc
= 0; /* after last location, wrap back around to first location */
242 * It's possible for this next server to not have any addresses.
243 * Check for that here and go to the next server.
244 * But bail out if we've managed to come back around to the original
245 * location that was passed in. (That would mean no servers had any
246 * addresses. And we don't want to spin here forever.)
248 if ((loc
== nlip
->nli_loc
) && (serv
== nlip
->nli_serv
) && (addr
== nlip
->nli_addr
))
250 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
)
254 nlip
->nli_serv
= serv
;
255 nlip
->nli_addr
= addr
;
259 * Compare two location indices.
262 nfs_location_index_cmp(struct nfs_location_index
*nlip1
, struct nfs_location_index
*nlip2
)
264 if (nlip1
->nli_loc
!= nlip2
->nli_loc
)
265 return (nlip1
->nli_loc
- nlip2
->nli_loc
);
266 if (nlip1
->nli_serv
!= nlip2
->nli_serv
)
267 return (nlip1
->nli_serv
- nlip2
->nli_serv
);
268 return (nlip1
->nli_addr
- nlip2
->nli_addr
);
272 * Get the mntfromname (or path portion only) for a given location.
275 nfs_location_mntfromname(struct nfs_fs_locations
*locs
, struct nfs_location_index idx
, char *s
, int size
, int pathonly
)
277 struct nfs_fs_location
*fsl
= locs
->nl_locations
[idx
.nli_loc
];
283 cnt
= snprintf(p
, size
, "%s:", fsl
->nl_servers
[idx
.nli_serv
]->ns_name
);
287 if (fsl
->nl_path
.np_compcount
== 0) {
288 /* mounting root export on server */
295 /* append each server path component */
296 for (i
=0; (size
> 0) && (i
< (int)fsl
->nl_path
.np_compcount
); i
++) {
297 cnt
= snprintf(p
, size
, "/%s", fsl
->nl_path
.np_components
[i
]);
304 * NFS client connect socket upcall.
305 * (Used only during socket connect/search.)
308 nfs_connect_upcall(socket_t so
, void *arg
, __unused
int waitflag
)
310 struct nfs_socket
*nso
= arg
;
313 int error
= 0, recv
= 1;
315 if (nso
->nso_flags
& NSO_CONNECTING
) {
316 NFS_SOCK_DBG(("nfs connect - socket %p upcall - connecting\n", nso
));
317 wakeup(nso
->nso_wake
);
321 lck_mtx_lock(&nso
->nso_lock
);
322 if ((nso
->nso_flags
& (NSO_UPCALL
|NSO_DISCONNECTING
|NSO_DEAD
)) || !(nso
->nso_flags
& NSO_PINGING
)) {
323 NFS_SOCK_DBG(("nfs connect - socket %p upcall - nevermind\n", nso
));
324 lck_mtx_unlock(&nso
->nso_lock
);
327 NFS_SOCK_DBG(("nfs connect - socket %p upcall\n", nso
));
328 nso
->nso_flags
|= NSO_UPCALL
;
330 /* loop while we make error-free progress */
331 while (!error
&& recv
) {
332 /* make sure we're still interested in this socket */
333 if (nso
->nso_flags
& (NSO_DISCONNECTING
|NSO_DEAD
))
335 lck_mtx_unlock(&nso
->nso_lock
);
337 if (nso
->nso_sotype
== SOCK_STREAM
) {
338 error
= nfs_rpc_record_read(so
, &nso
->nso_rrs
, MSG_DONTWAIT
, &recv
, &m
);
341 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
344 lck_mtx_lock(&nso
->nso_lock
);
346 /* match response with request */
347 struct nfsm_chain nmrep
;
348 uint32_t reply
= 0, rxid
= 0, verf_type
, verf_len
;
349 uint32_t reply_status
, rejected_status
, accepted_status
;
351 nfsm_chain_dissect_init(error
, &nmrep
, m
);
352 nfsm_chain_get_32(error
, &nmrep
, rxid
);
353 nfsm_chain_get_32(error
, &nmrep
, reply
);
354 if (!error
&& ((reply
!= RPC_REPLY
) || (rxid
!= nso
->nso_pingxid
)))
356 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
357 if (!error
&& (reply_status
== RPC_MSGDENIED
)) {
358 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
360 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
362 nfsm_chain_get_32(error
, &nmrep
, verf_type
); /* verifier flavor */
363 nfsm_chain_get_32(error
, &nmrep
, verf_len
); /* verifier length */
366 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
367 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
369 if ((accepted_status
== RPC_PROGMISMATCH
) && !nso
->nso_version
) {
370 uint32_t minvers
, maxvers
;
371 nfsm_chain_get_32(error
, &nmrep
, minvers
);
372 nfsm_chain_get_32(error
, &nmrep
, maxvers
);
374 if (nso
->nso_protocol
== PMAPPROG
) {
375 if ((minvers
> RPCBVERS4
) || (maxvers
< PMAPVERS
))
376 error
= EPROGMISMATCH
;
377 else if ((nso
->nso_saddr
->sa_family
== AF_INET
) &&
378 (PMAPVERS
>= minvers
) && (PMAPVERS
<= maxvers
))
379 nso
->nso_version
= PMAPVERS
;
380 else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
381 if ((RPCBVERS4
>= minvers
) && (RPCBVERS4
<= maxvers
))
382 nso
->nso_version
= RPCBVERS4
;
383 else if ((RPCBVERS3
>= minvers
) && (RPCBVERS3
<= maxvers
))
384 nso
->nso_version
= RPCBVERS3
;
386 } else if (nso
->nso_protocol
== NFS_PROG
) {
387 if ((minvers
> NFS_VER4
) || (maxvers
< NFS_VER2
))
388 error
= EPROGMISMATCH
;
389 else if ((NFS_VER3
>= minvers
) && (NFS_VER3
<= maxvers
))
390 nso
->nso_version
= NFS_VER3
;
391 else if ((NFS_VER2
>= minvers
) && (NFS_VER2
<= maxvers
))
392 nso
->nso_version
= NFS_VER2
;
393 else if ((NFS_VER4
>= minvers
) && (NFS_VER4
<= maxvers
))
394 nso
->nso_version
= NFS_VER4
;
396 if (!error
&& nso
->nso_version
)
397 accepted_status
= RPC_SUCCESS
;
400 switch (accepted_status
) {
404 case RPC_PROGUNAVAIL
:
405 error
= EPROGUNAVAIL
;
407 case RPC_PROGMISMATCH
:
408 error
= EPROGMISMATCH
;
410 case RPC_PROCUNAVAIL
:
411 error
= EPROCUNAVAIL
;
423 nso
->nso_flags
&= ~NSO_PINGING
;
425 nso
->nso_error
= error
;
426 nso
->nso_flags
|= NSO_DEAD
;
428 nso
->nso_flags
|= NSO_VERIFIED
;
431 /* wake up search thread */
432 wakeup(nso
->nso_wake
);
437 nso
->nso_flags
&= ~NSO_UPCALL
;
438 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
439 /* problems with the socket... */
440 nso
->nso_error
= error
? error
: EPIPE
;
441 nso
->nso_flags
|= NSO_DEAD
;
442 wakeup(nso
->nso_wake
);
444 if (nso
->nso_flags
& NSO_DISCONNECTING
)
445 wakeup(&nso
->nso_flags
);
446 lck_mtx_unlock(&nso
->nso_lock
);
450 * Create/initialize an nfs_socket structure.
454 __unused
struct nfsmount
*nmp
,
461 struct nfs_socket
**nsop
)
463 struct nfs_socket
*nso
;
466 #ifdef NFS_SOCKET_DEBUGGING
467 char naddr
[MAX_IPv6_STR_LEN
];
470 if (sa
->sa_family
== AF_INET
)
471 sinaddr
= &((struct sockaddr_in
*)sa
)->sin_addr
;
473 sinaddr
= &((struct sockaddr_in6
*)sa
)->sin6_addr
;
474 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
)
475 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
480 /* Create the socket. */
481 MALLOC(nso
, struct nfs_socket
*, sizeof(struct nfs_socket
), M_TEMP
, M_WAITOK
|M_ZERO
);
483 MALLOC(nso
->nso_saddr
, struct sockaddr
*, sa
->sa_len
, M_SONAME
, M_WAITOK
|M_ZERO
);
484 if (!nso
|| !nso
->nso_saddr
) {
489 lck_mtx_init(&nso
->nso_lock
, nfs_request_grp
, LCK_ATTR_NULL
);
490 nso
->nso_sotype
= sotype
;
491 if (nso
->nso_sotype
== SOCK_STREAM
)
492 nfs_rpc_record_state_init(&nso
->nso_rrs
);
494 nso
->nso_timestamp
= now
.tv_sec
;
495 bcopy(sa
, nso
->nso_saddr
, sa
->sa_len
);
496 if (sa
->sa_family
== AF_INET
)
497 ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
498 else if (sa
->sa_family
== AF_INET6
)
499 ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
500 nso
->nso_protocol
= protocol
;
501 nso
->nso_version
= vers
;
503 error
= sock_socket(sa
->sa_family
, nso
->nso_sotype
, 0, NULL
, NULL
, &nso
->nso_so
);
505 /* Some servers require that the client port be a reserved port number. */
506 if (!error
&& resvport
&& ((sa
->sa_family
== AF_INET
) || (sa
->sa_family
== AF_INET6
))) {
507 struct sockaddr_storage ss
;
508 int level
= (sa
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
509 int optname
= (sa
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
510 int portrange
= IP_PORTRANGE_LOW
;
512 error
= sock_setsockopt(nso
->nso_so
, level
, optname
, &portrange
, sizeof(portrange
));
513 if (!error
) { /* bind now to check for failure */
514 ss
.ss_len
= sa
->sa_len
;
515 ss
.ss_family
= sa
->sa_family
;
516 if (ss
.ss_family
== AF_INET
) {
517 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
518 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
519 } else if (ss
.ss_family
== AF_INET6
) {
520 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
521 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
526 error
= sock_bind(nso
->nso_so
, (struct sockaddr
*)&ss
);
531 NFS_SOCK_DBG(("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
532 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nso
, naddr
, sotype
,
533 resvport
? "r" : "", port
, protocol
, vers
));
534 nfs_socket_destroy(nso
);
536 NFS_SOCK_DBG(("nfs connect %s created socket %p %s type %d%s port %d prot %d %d\n",
537 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, naddr
,
538 sotype
, resvport
? "r" : "", port
, protocol
, vers
));
545 * Destroy an nfs_socket structure.
548 nfs_socket_destroy(struct nfs_socket
*nso
)
550 struct timespec ts
= { 4, 0 };
552 lck_mtx_lock(&nso
->nso_lock
);
553 nso
->nso_flags
|= NSO_DISCONNECTING
;
554 if (nso
->nso_flags
& NSO_UPCALL
) /* give upcall a chance to complete */
555 msleep(&nso
->nso_flags
, &nso
->nso_lock
, PZERO
-1, "nfswaitupcall", &ts
);
556 lck_mtx_unlock(&nso
->nso_lock
);
557 sock_shutdown(nso
->nso_so
, SHUT_RDWR
);
558 sock_close(nso
->nso_so
);
559 if (nso
->nso_sotype
== SOCK_STREAM
)
560 nfs_rpc_record_state_cleanup(&nso
->nso_rrs
);
561 lck_mtx_destroy(&nso
->nso_lock
, nfs_request_grp
);
563 FREE(nso
->nso_saddr
, M_SONAME
);
565 FREE(nso
->nso_saddr2
, M_SONAME
);
566 NFS_SOCK_DBG(("nfs connect - socket %p destroyed\n", nso
));
571 * Set common socket options on an nfs_socket.
574 nfs_socket_options(struct nfsmount
*nmp
, struct nfs_socket
*nso
)
577 * Set socket send/receive timeouts
578 * - Receive timeout shouldn't matter because most receives are performed
579 * in the socket upcall non-blocking.
580 * - Send timeout should allow us to react to a blocked socket.
581 * Soft mounts will want to abort sooner.
583 struct timeval timeo
;
587 timeo
.tv_sec
= NMFLAG(nmp
, SOFT
) ? 5 : 60;
588 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
589 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
590 if (nso
->nso_sotype
== SOCK_STREAM
) {
591 /* Assume that SOCK_STREAM always requires a connection */
592 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_KEEPALIVE
, &on
, sizeof(on
));
593 /* set nodelay for TCP */
594 sock_gettype(nso
->nso_so
, NULL
, NULL
, &proto
);
595 if (proto
== IPPROTO_TCP
)
596 sock_setsockopt(nso
->nso_so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
598 if (nso
->nso_sotype
== SOCK_DGRAM
) { /* set socket buffer sizes for UDP */
599 int reserve
= NFS_UDPSOCKBUF
;
600 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDBUF
, &reserve
, sizeof(reserve
));
601 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVBUF
, &reserve
, sizeof(reserve
));
603 /* set SO_NOADDRERR to detect network changes ASAP */
604 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
605 /* just playin' it safe with upcalls */
606 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
607 /* socket should be interruptible if the mount is */
608 if (!NMFLAG(nmp
, INTR
))
609 sock_nointerrupt(nso
->nso_so
, 1);
613 * Release resources held in an nfs_socket_search.
616 nfs_socket_search_cleanup(struct nfs_socket_search
*nss
)
618 struct nfs_socket
*nso
, *nsonext
;
620 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
621 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
623 nfs_socket_destroy(nso
);
626 nfs_socket_destroy(nss
->nss_sock
);
627 nss
->nss_sock
= NULL
;
632 * Prefer returning certain errors over others.
633 * This function returns a ranking of the given error.
636 nfs_connect_error_class(int error
)
671 * Make sure a socket search returns the best error.
674 nfs_socket_search_update_error(struct nfs_socket_search
*nss
, int error
)
676 if (nfs_connect_error_class(error
) >= nfs_connect_error_class(nss
->nss_error
))
677 nss
->nss_error
= error
;
681 * Continue the socket search until we have something to report.
684 nfs_connect_search_loop(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
)
686 struct nfs_socket
*nso
, *nsonext
;
688 struct nfs_fs_location
*fsl
;
689 struct nfs_fs_server
*fss
;
690 struct sockaddr_storage ss
;
692 int error
, nomore
= 0;
696 NFS_SOCK_DBG(("nfs connect %s search %ld\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, now
.tv_sec
));
698 /* Time to start another socket? */
699 while ((nss
->nss_last
< 0) || (nss
->nss_sockcnt
== 0) ||
700 ((nss
->nss_sockcnt
< 4) && (now
.tv_sec
>= (nss
->nss_last
+ 2)))) {
701 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
703 /* Find the next address to try... */
704 /* Have we run out of locations? */
705 if (!nomore
&& (nss
->nss_last
!= -1) && !nfs_location_index_cmp(&nss
->nss_nextloc
, &nss
->nss_startloc
))
708 if (nss
->nss_last
< 0)
709 nss
->nss_last
= now
.tv_sec
;
712 /* Can we convert the address to a sockaddr? */
713 fsl
= nmp
->nm_locations
.nl_locations
[nss
->nss_nextloc
.nli_loc
];
714 fss
= fsl
->nl_servers
[nss
->nss_nextloc
.nli_serv
];
715 addrstr
= fss
->ns_addresses
[nss
->nss_nextloc
.nli_addr
];
716 if (!nfs_uaddr2sockaddr(addrstr
, (struct sockaddr
*)&ss
)) {
717 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
721 /* Check that socket family is acceptable. */
722 if (nmp
->nm_sofamily
&& (ss
.ss_family
!= nmp
->nm_sofamily
)) {
723 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
728 /* Create the socket. */
729 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nss
->nss_sotype
,
730 nss
->nss_port
, nss
->nss_protocol
, nss
->nss_version
,
731 ((nss
->nss_protocol
== NFS_PROG
) && NMFLAG(nmp
, RESVPORT
)), &nso
);
735 nso
->nso_location
= nss
->nss_nextloc
;
737 error
= sock_setupcall(nso
->nso_so
, nfs_connect_upcall
, nso
);
739 lck_mtx_lock(&nso
->nso_lock
);
740 nso
->nso_error
= error
;
741 nso
->nso_flags
|= NSO_DEAD
;
742 lck_mtx_unlock(&nso
->nso_lock
);
745 TAILQ_INSERT_TAIL(&nss
->nss_socklist
, nso
, nso_link
);
747 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
749 nss
->nss_last
= now
.tv_sec
;
752 /* check each active socket and try to push it along */
753 TAILQ_FOREACH(nso
, &nss
->nss_socklist
, nso_link
) {
754 lck_mtx_lock(&nso
->nso_lock
);
755 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
756 if ((nso
->nso_sotype
!= SOCK_STREAM
) && NMFLAG(nmp
, NOCONNECT
)) {
757 /* no connection needed, just say it's already connected */
758 nso
->nso_flags
|= NSO_CONNECTED
;
759 NFS_SOCK_DBG(("nfs connect %s UDP socket %p noconnect\n",
760 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
761 } else if (!(nso
->nso_flags
& NSO_CONNECTING
)) {
762 /* initiate the connection */
763 nso
->nso_flags
|= NSO_CONNECTING
;
764 lck_mtx_unlock(&nso
->nso_lock
);
765 NFS_SOCK_DBG(("nfs connect %s connecting socket %p\n",
766 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
767 error
= sock_connect(nso
->nso_so
, nso
->nso_saddr
, MSG_DONTWAIT
);
768 lck_mtx_lock(&nso
->nso_lock
);
769 if (error
&& (error
!= EINPROGRESS
)) {
770 nso
->nso_error
= error
;
771 nso
->nso_flags
|= NSO_DEAD
;
772 lck_mtx_unlock(&nso
->nso_lock
);
776 if (nso
->nso_flags
& NSO_CONNECTING
) {
777 /* check the connection */
778 if (sock_isconnected(nso
->nso_so
)) {
779 NFS_SOCK_DBG(("nfs connect %s socket %p is connected\n",
780 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
781 nso
->nso_flags
&= ~NSO_CONNECTING
;
782 nso
->nso_flags
|= NSO_CONNECTED
;
784 int optlen
= sizeof(error
);
786 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &error
, &optlen
);
787 if (error
) { /* we got an error on the socket */
788 NFS_SOCK_DBG(("nfs connect %s socket %p connection error %d\n",
789 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
));
790 if (nss
->nss_flags
& NSS_VERBOSE
)
791 log(LOG_INFO
, "nfs_connect: socket error %d for %s\n",
792 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
793 nso
->nso_error
= error
;
794 nso
->nso_flags
|= NSO_DEAD
;
795 lck_mtx_unlock(&nso
->nso_lock
);
800 if (nso
->nso_flags
& NSO_CONNECTED
)
801 nfs_socket_options(nmp
, nso
);
803 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
804 lck_mtx_unlock(&nso
->nso_lock
);
807 if (!(nso
->nso_flags
& (NSO_PINGING
|NSO_VERIFIED
)) ||
808 ((nso
->nso_sotype
== SOCK_DGRAM
) && (now
.tv_sec
>= nso
->nso_reqtimestamp
+2))) {
809 /* initiate a NULL RPC request */
810 uint64_t xid
= nso
->nso_pingxid
;
811 mbuf_t m
, mreq
= NULL
;
813 size_t reqlen
, sentlen
;
816 if (!(vers
= nso
->nso_version
)) {
817 if (nso
->nso_protocol
== PMAPPROG
)
818 vers
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
819 else if (nso
->nso_protocol
== NFS_PROG
)
822 lck_mtx_unlock(&nso
->nso_lock
);
823 error
= nfsm_rpchead2(nmp
, nso
->nso_sotype
, nso
->nso_protocol
, vers
, 0, RPCAUTH_SYS
,
824 vfs_context_ucred(vfs_context_kernel()), NULL
, NULL
, &xid
, &mreq
);
825 lck_mtx_lock(&nso
->nso_lock
);
827 nso
->nso_flags
|= NSO_PINGING
;
828 nso
->nso_pingxid
= R_XID32(xid
);
829 nso
->nso_reqtimestamp
= now
.tv_sec
;
830 bzero(&msg
, sizeof(msg
));
831 if ((nso
->nso_sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
)) {
832 msg
.msg_name
= nso
->nso_saddr
;
833 msg
.msg_namelen
= nso
->nso_saddr
->sa_len
;
835 for (reqlen
=0, m
=mreq
; m
; m
= mbuf_next(m
))
836 reqlen
+= mbuf_len(m
);
837 lck_mtx_unlock(&nso
->nso_lock
);
838 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreq
, 0, &sentlen
);
839 NFS_SOCK_DBG(("nfs connect %s verifying socket %p send rv %d\n",
840 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
));
841 lck_mtx_lock(&nso
->nso_lock
);
842 if (!error
&& (sentlen
!= reqlen
))
846 nso
->nso_error
= error
;
847 nso
->nso_flags
|= NSO_DEAD
;
848 lck_mtx_unlock(&nso
->nso_lock
);
852 if (nso
->nso_flags
& NSO_VERIFIED
) {
853 /* WOOHOO!! This socket looks good! */
854 NFS_SOCK_DBG(("nfs connect %s socket %p verified\n",
855 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
856 if (!nso
->nso_version
) {
857 /* If the version isn't set, the default must have worked. */
858 if (nso
->nso_protocol
== PMAPPROG
)
859 nso
->nso_version
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
860 if (nso
->nso_protocol
== NFS_PROG
)
861 nso
->nso_version
= NFS_VER3
;
863 lck_mtx_unlock(&nso
->nso_lock
);
864 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
869 lck_mtx_unlock(&nso
->nso_lock
);
872 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
873 lck_mtx_lock(&nso
->nso_lock
);
874 if (now
.tv_sec
>= (nso
->nso_timestamp
+ nss
->nss_timeo
)) {
876 NFS_SOCK_DBG(("nfs connect %s socket %p timed out\n",
877 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
878 nso
->nso_error
= ETIMEDOUT
;
879 nso
->nso_flags
|= NSO_DEAD
;
881 if (!(nso
->nso_flags
& NSO_DEAD
)) {
882 lck_mtx_unlock(&nso
->nso_lock
);
885 lck_mtx_unlock(&nso
->nso_lock
);
886 NFS_SOCK_DBG(("nfs connect %s reaping socket %p %d\n",
887 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, nso
->nso_error
));
888 nfs_socket_search_update_error(nss
, nso
->nso_error
);
889 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
891 nfs_socket_destroy(nso
);
897 * Keep looping if we haven't found a socket yet and we have more
898 * sockets to (continue to) try.
901 if (!nss
->nss_sock
&& (!TAILQ_EMPTY(&nss
->nss_socklist
) || !nomore
)) {
902 /* log a warning if connect is taking a while */
903 if (((now
.tv_sec
- nss
->nss_timestamp
) >= 30) && ((nss
->nss_flags
& (NSS_VERBOSE
|NSS_WARNED
)) == NSS_VERBOSE
)) {
904 log(LOG_INFO
, "nfs_connect: socket connect taking a while for %s\n",
905 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
906 nss
->nss_flags
|= NSS_WARNED
;
908 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
910 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 0)))
912 if (nss
->nss_last
>= 0)
913 tsleep(nss
, PSOCK
, "nfs_connect_search_wait", hz
);
917 NFS_SOCK_DBG(("nfs connect %s returning %d\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
));
922 * Initialize a new NFS connection.
924 * Search for a location to connect a socket to and initialize the connection.
926 * An NFS mount may have multiple locations/servers/addresses available.
927 * We attempt to connect to each one asynchronously and will start
928 * several sockets in parallel if other locations are slow to answer.
929 * We'll use the first NFS socket we can successfully set up.
931 * The search may involve contacting the portmapper service first.
933 * A mount's initial connection may require negotiating some parameters such
934 * as socket type and NFS version.
937 nfs_connect(struct nfsmount
*nmp
, int verbose
, int timeo
)
939 struct nfs_socket_search nss
;
940 struct nfs_socket
*nso
, *nsonfs
;
941 struct sockaddr_storage ss
;
942 struct sockaddr
*saddr
, *oldsaddr
;
944 struct timeval now
, start
;
945 int error
, savederror
, nfsvers
;
946 uint8_t sotype
= nmp
->nm_sotype
? nmp
->nm_sotype
: SOCK_STREAM
;
947 fhandle_t
*fh
= NULL
;
951 /* paranoia... check that we have at least one address in the locations */
953 for (loc
=0; loc
< nmp
->nm_locations
.nl_numlocs
; loc
++) {
954 for (serv
=0; serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
; serv
++) {
955 if (nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
)
957 NFS_SOCK_DBG(("nfs connect %s search, server %s has no addresses\n",
958 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
959 nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_name
));
961 if (serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
)
964 if (loc
>= nmp
->nm_locations
.nl_numlocs
) {
965 NFS_SOCK_DBG(("nfs connect %s search failed, no addresses\n",
966 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
));
970 lck_mtx_lock(&nmp
->nm_lock
);
971 nmp
->nm_sockflags
|= NMSOCK_CONNECTING
;
973 lck_mtx_unlock(&nmp
->nm_lock
);
975 savederror
= error
= 0;
978 /* initialize socket search state */
979 bzero(&nss
, sizeof(nss
));
980 nss
.nss_error
= savederror
;
981 TAILQ_INIT(&nss
.nss_socklist
);
982 nss
.nss_sotype
= sotype
;
983 nss
.nss_startloc
= nmp
->nm_locations
.nl_current
;
984 nss
.nss_timestamp
= start
.tv_sec
;
985 nss
.nss_timeo
= timeo
;
987 nss
.nss_flags
|= NSS_VERBOSE
;
989 /* First time connecting, we may need to negotiate some things */
990 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
992 /* No NFS version specified... */
993 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
994 /* ...connect to portmapper first if we (may) need any ports. */
995 nss
.nss_port
= PMAPPORT
;
996 nss
.nss_protocol
= PMAPPROG
;
999 /* ...connect to NFS port first. */
1000 nss
.nss_port
= nmp
->nm_nfsport
;
1001 nss
.nss_protocol
= NFS_PROG
;
1002 nss
.nss_version
= 0;
1004 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1005 /* For NFSv4, we use the given (or default) port. */
1006 nss
.nss_port
= nmp
->nm_nfsport
? nmp
->nm_nfsport
: NFS_PORT
;
1007 nss
.nss_protocol
= NFS_PROG
;
1008 nss
.nss_version
= 4;
1010 /* For NFSv3/v2... */
1011 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1012 /* ...connect to portmapper first if we need any ports. */
1013 nss
.nss_port
= PMAPPORT
;
1014 nss
.nss_protocol
= PMAPPROG
;
1015 nss
.nss_version
= 0;
1017 /* ...connect to NFS port first. */
1018 nss
.nss_port
= nmp
->nm_nfsport
;
1019 nss
.nss_protocol
= NFS_PROG
;
1020 nss
.nss_version
= nmp
->nm_vers
;
1023 NFS_SOCK_DBG(("nfs connect first %s, so type %d port %d prot %d %d\n",
1024 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1025 nss
.nss_protocol
, nss
.nss_version
));
1027 /* we've connected before, just connect to NFS port */
1028 if (!nmp
->nm_nfsport
) {
1029 /* need to ask portmapper which port that would be */
1030 nss
.nss_port
= PMAPPORT
;
1031 nss
.nss_protocol
= PMAPPROG
;
1032 nss
.nss_version
= 0;
1034 nss
.nss_port
= nmp
->nm_nfsport
;
1035 nss
.nss_protocol
= NFS_PROG
;
1036 nss
.nss_version
= nmp
->nm_vers
;
1038 NFS_SOCK_DBG(("nfs connect %s, so type %d port %d prot %d %d\n",
1039 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1040 nss
.nss_protocol
, nss
.nss_version
));
1043 /* Set next location to first valid location. */
1044 /* If start location is invalid, find next location. */
1045 nss
.nss_nextloc
= nss
.nss_startloc
;
1046 if ((nss
.nss_nextloc
.nli_serv
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servcount
) ||
1047 (nss
.nss_nextloc
.nli_addr
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servers
[nss
.nss_nextloc
.nli_serv
]->ns_addrcount
)) {
1048 nfs_location_next(&nmp
->nm_locations
, &nss
.nss_nextloc
);
1049 if (!nfs_location_index_cmp(&nss
.nss_nextloc
, &nss
.nss_startloc
)) {
1050 NFS_SOCK_DBG(("nfs connect %s search failed, couldn't find a valid location index\n",
1051 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
));
1059 error
= nfs_connect_search_loop(nmp
, &nss
);
1060 if (error
|| !nss
.nss_sock
) {
1062 nfs_socket_search_cleanup(&nss
);
1063 if (!error
&& (nss
.nss_sotype
== SOCK_STREAM
) && !nmp
->nm_sotype
&& (nmp
->nm_vers
< NFS_VER4
)) {
1065 sotype
= SOCK_DGRAM
;
1066 savederror
= nss
.nss_error
;
1067 NFS_SOCK_DBG(("nfs connect %s TCP failed %d %d, trying UDP\n",
1068 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
));
1072 error
= nss
.nss_error
? nss
.nss_error
: ETIMEDOUT
;
1073 lck_mtx_lock(&nmp
->nm_lock
);
1074 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1076 lck_mtx_unlock(&nmp
->nm_lock
);
1077 if (nss
.nss_flags
& NSS_WARNED
)
1078 log(LOG_INFO
, "nfs_connect: socket connect aborted for %s\n",
1079 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1083 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1084 NFS_SOCK_DBG(("nfs connect %s search failed, returning %d\n",
1085 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
));
1089 /* try to use nss_sock */
1091 nss
.nss_sock
= NULL
;
1093 /* We may be speaking to portmap first... to determine port(s). */
1094 if (nso
->nso_saddr
->sa_family
== AF_INET
)
1095 port
= ntohs(((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
);
1097 port
= ntohs(((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
);
1098 if (port
== PMAPPORT
) {
1099 /* Use this portmapper port to get the port #s we need. */
1100 NFS_SOCK_DBG(("nfs connect %s got portmapper socket %p\n",
1101 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
1103 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1104 sock_setupcall(nso
->nso_so
, NULL
, NULL
);
1106 /* Set up socket address and port for NFS socket. */
1107 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1109 /* If NFS version not set, try NFSv3 then NFSv2. */
1110 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: NFS_VER3
;
1112 if (!(port
= nmp
->nm_nfsport
)) {
1113 if (ss
.ss_family
== AF_INET
)
1114 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
1115 else if (ss
.ss_family
== AF_INET6
)
1116 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
1117 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1118 nso
->nso_so
, NFS_PROG
, nfsvers
,
1119 (nso
->nso_sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
, timeo
);
1121 if (ss
.ss_family
== AF_INET
)
1122 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1123 else if (ss
.ss_family
== AF_INET6
)
1124 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1126 error
= EPROGUNAVAIL
;
1128 if (error
&& !nmp
->nm_vers
) {
1130 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1131 nso
->nso_so
, NFS_PROG
, nfsvers
,
1132 (nso
->nso_sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
, timeo
);
1134 if (ss
.ss_family
== AF_INET
)
1135 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1136 else if (ss
.ss_family
== AF_INET6
)
1137 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1139 error
= EPROGUNAVAIL
;
1143 nfs_socket_search_update_error(&nss
, error
);
1144 nfs_socket_destroy(nso
);
1148 /* Create NFS protocol socket and add it to the list of sockets. */
1149 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nso
->nso_sotype
, port
,
1150 NFS_PROG
, nfsvers
, NMFLAG(nmp
, RESVPORT
), &nsonfs
);
1152 nfs_socket_search_update_error(&nss
, error
);
1153 nfs_socket_destroy(nso
);
1156 nsonfs
->nso_location
= nso
->nso_location
;
1157 nsonfs
->nso_wake
= &nss
;
1158 error
= sock_setupcall(nsonfs
->nso_so
, nfs_connect_upcall
, nsonfs
);
1160 nfs_socket_search_update_error(&nss
, error
);
1161 nfs_socket_destroy(nsonfs
);
1162 nfs_socket_destroy(nso
);
1165 TAILQ_INSERT_TAIL(&nss
.nss_socklist
, nsonfs
, nso_link
);
1167 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1168 /* Set up socket address and port for MOUNT socket. */
1170 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1171 port
= nmp
->nm_mountport
;
1172 if (ss
.ss_family
== AF_INET
)
1173 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1174 else if (ss
.ss_family
== AF_INET6
)
1175 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1177 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1178 /* If NFS version is unknown, optimistically choose for NFSv3. */
1179 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1180 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1181 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1182 nso
->nso_so
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1185 if (ss
.ss_family
== AF_INET
)
1186 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1187 else if (ss
.ss_family
== AF_INET6
)
1188 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1190 error
= EPROGUNAVAIL
;
1192 /* create sockaddr for MOUNT */
1194 MALLOC(nsonfs
->nso_saddr2
, struct sockaddr
*, ss
.ss_len
, M_SONAME
, M_WAITOK
|M_ZERO
);
1195 if (!error
&& !nsonfs
->nso_saddr2
)
1198 bcopy(&ss
, nsonfs
->nso_saddr2
, ss
.ss_len
);
1200 lck_mtx_lock(&nsonfs
->nso_lock
);
1201 nsonfs
->nso_error
= error
;
1202 nsonfs
->nso_flags
|= NSO_DEAD
;
1203 lck_mtx_unlock(&nsonfs
->nso_lock
);
1206 nfs_socket_destroy(nso
);
1210 /* nso is an NFS socket */
1211 NFS_SOCK_DBG(("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
));
1213 /* If NFS version wasn't specified, it was determined during the connect. */
1214 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: (int)nso
->nso_version
;
1216 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1217 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1219 saddr
= nso
->nso_saddr2
;
1221 /* Need sockaddr for MOUNT port */
1222 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1223 port
= nmp
->nm_mountport
;
1224 if (ss
.ss_family
== AF_INET
)
1225 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1226 else if (ss
.ss_family
== AF_INET6
)
1227 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1229 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1230 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1231 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1232 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1233 NULL
, RPCPROG_MNT
, mntvers
, mntproto
, timeo
);
1234 if (ss
.ss_family
== AF_INET
)
1235 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1236 else if (ss
.ss_family
== AF_INET6
)
1237 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1241 saddr
= (struct sockaddr
*)&ss
;
1243 error
= EPROGUNAVAIL
;
1247 MALLOC(fh
, fhandle_t
*, sizeof(fhandle_t
), M_TEMP
, M_WAITOK
|M_ZERO
);
1249 MALLOC_ZONE(path
, char *, MAXPATHLEN
, M_NAMEI
, M_WAITOK
);
1250 if (!saddr
|| !fh
|| !path
) {
1256 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1259 nfs_socket_search_update_error(&nss
, error
);
1260 nfs_socket_destroy(nso
);
1263 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
, path
, MAXPATHLEN
, 1);
1264 error
= nfs3_mount_rpc(nmp
, saddr
, nso
->nso_sotype
, nfsvers
,
1265 path
, vfs_context_current(), timeo
, fh
, &nmp
->nm_servsec
);
1266 NFS_SOCK_DBG(("nfs connect %s socket %p mount %d\n",
1267 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
));
1269 /* Make sure we can agree on a security flavor. */
1270 int o
, s
; /* indices into mount option and server security flavor lists */
1273 if ((nfsvers
== NFS_VER3
) && !nmp
->nm_servsec
.count
) {
1274 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1275 nmp
->nm_servsec
.count
= 1;
1276 nmp
->nm_servsec
.flavors
[0] = RPCAUTH_SYS
;
1278 if (nmp
->nm_sec
.count
) {
1279 /* Choose the first flavor in our list that the server supports. */
1280 if (!nmp
->nm_servsec
.count
) {
1281 /* we don't know what the server supports, just use our first choice */
1282 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[0];
1285 for (o
=0; !found
&& (o
< nmp
->nm_sec
.count
); o
++)
1286 for (s
=0; !found
&& (s
< nmp
->nm_servsec
.count
); s
++)
1287 if (nmp
->nm_sec
.flavors
[o
] == nmp
->nm_servsec
.flavors
[s
]) {
1288 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[o
];
1292 /* Choose the first one we support from the server's list. */
1293 if (!nmp
->nm_servsec
.count
) {
1294 nmp
->nm_auth
= RPCAUTH_SYS
;
1297 for (s
=0; s
< nmp
->nm_servsec
.count
; s
++)
1298 switch (nmp
->nm_servsec
.flavors
[s
]) {
1300 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1301 if (found
&& (nmp
->nm_auth
== RPCAUTH_NONE
))
1308 nmp
->nm_auth
= nmp
->nm_servsec
.flavors
[s
];
1314 error
= !found
? EAUTH
: 0;
1316 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1319 nfs_socket_search_update_error(&nss
, error
);
1322 nfs_socket_destroy(nso
);
1326 FREE(nmp
->nm_fh
, M_TEMP
);
1329 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_CALLUMNT
);
1332 /* put the real upcall in place */
1333 upcall
= (nso
->nso_sotype
== SOCK_STREAM
) ? nfs_tcp_rcv
: nfs_udp_rcv
;
1334 error
= sock_setupcall(nso
->nso_so
, upcall
, nmp
);
1336 nfs_socket_search_update_error(&nss
, error
);
1337 nfs_socket_destroy(nso
);
1341 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1342 /* set mntfromname to this location */
1343 if (!NM_OMATTR_GIVEN(nmp
, MNTFROM
))
1344 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
,
1345 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1346 sizeof(vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
), 0);
1347 /* some negotiated values need to remain unchanged for the life of the mount */
1348 if (!nmp
->nm_sotype
)
1349 nmp
->nm_sotype
= nso
->nso_sotype
;
1350 if (!nmp
->nm_vers
) {
1351 nmp
->nm_vers
= nfsvers
;
1352 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1353 if ((nfsvers
>= NFS_VER4
) && !NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1354 if (nso
->nso_saddr
->sa_family
== AF_INET
)
1355 port
= ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
1356 else if (nso
->nso_saddr
->sa_family
== AF_INET6
)
1357 port
= ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
1360 if (port
== NFS_PORT
)
1361 nmp
->nm_nfsport
= NFS_PORT
;
1364 /* do some version-specific pre-mount set up */
1365 if (nmp
->nm_vers
>= NFS_VER4
) {
1367 nmp
->nm_mounttime
= ((uint64_t)now
.tv_sec
<< 32) | now
.tv_usec
;
1368 if (!NMFLAG(nmp
, NOCALLBACK
))
1369 nfs4_mount_callback_setup(nmp
);
1373 /* Initialize NFS socket state variables */
1374 lck_mtx_lock(&nmp
->nm_lock
);
1375 nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] =
1376 nmp
->nm_srtt
[3] = (NFS_TIMEO
<< 3);
1377 nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
1378 nmp
->nm_sdrtt
[3] = 0;
1379 if (nso
->nso_sotype
== SOCK_DGRAM
) {
1380 nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
1382 } else if (nso
->nso_sotype
== SOCK_STREAM
) {
1383 nmp
->nm_timeouts
= 0;
1385 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1386 nmp
->nm_sockflags
|= NMSOCK_SETUP
;
1387 /* move the socket to the mount structure */
1389 oldsaddr
= nmp
->nm_saddr
;
1390 nmp
->nm_saddr
= nso
->nso_saddr
;
1391 lck_mtx_unlock(&nmp
->nm_lock
);
1392 error
= nfs_connect_setup(nmp
);
1393 lck_mtx_lock(&nmp
->nm_lock
);
1394 nmp
->nm_sockflags
&= ~NMSOCK_SETUP
;
1396 nmp
->nm_sockflags
|= NMSOCK_READY
;
1397 wakeup(&nmp
->nm_sockflags
);
1400 NFS_SOCK_DBG(("nfs connect %s socket %p setup failed %d\n",
1401 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
));
1402 nfs_socket_search_update_error(&nss
, error
);
1403 nmp
->nm_saddr
= oldsaddr
;
1404 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1405 /* undo settings made prior to setup */
1406 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_SOCKET_TYPE
))
1408 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_VERSION
)) {
1409 if (nmp
->nm_vers
>= NFS_VER4
) {
1410 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
))
1411 nmp
->nm_nfsport
= 0;
1413 nfs4_mount_callback_shutdown(nmp
);
1414 if (IS_VALID_CRED(nmp
->nm_mcred
))
1415 kauth_cred_unref(&nmp
->nm_mcred
);
1416 bzero(&nmp
->nm_un
, sizeof(nmp
->nm_un
));
1421 lck_mtx_unlock(&nmp
->nm_lock
);
1423 nfs_socket_destroy(nso
);
1427 /* update current location */
1428 if ((nmp
->nm_locations
.nl_current
.nli_flags
& NLI_VALID
) &&
1429 (nmp
->nm_locations
.nl_current
.nli_serv
!= nso
->nso_location
.nli_serv
)) {
1430 /* server has changed, we should initiate failover/recovery */
1433 nmp
->nm_locations
.nl_current
= nso
->nso_location
;
1434 nmp
->nm_locations
.nl_current
.nli_flags
|= NLI_VALID
;
1436 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1437 /* We have now successfully connected... make a note of it. */
1438 nmp
->nm_sockflags
|= NMSOCK_HASCONNECTED
;
1441 lck_mtx_unlock(&nmp
->nm_lock
);
1443 FREE(oldsaddr
, M_SONAME
);
1445 if (nss
.nss_flags
& NSS_WARNED
)
1446 log(LOG_INFO
, "nfs_connect: socket connect completed for %s\n",
1447 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1450 nfs_socket_search_cleanup(&nss
);
1454 FREE_ZONE(path
, MAXPATHLEN
, M_NAMEI
);
1455 NFS_SOCK_DBG(("nfs connect %s success\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
));
1460 /* setup & confirm socket connection is functional */
1462 nfs_connect_setup(struct nfsmount
*nmp
)
1466 if (nmp
->nm_vers
>= NFS_VER4
) {
1467 if (nmp
->nm_state
& NFSSTA_CLIENTID
) {
1468 /* first, try to renew our current state */
1469 error
= nfs4_renew(nmp
, R_SETUP
);
1470 if ((error
== NFSERR_ADMIN_REVOKED
) ||
1471 (error
== NFSERR_CB_PATH_DOWN
) ||
1472 (error
== NFSERR_EXPIRED
) ||
1473 (error
== NFSERR_LEASE_MOVED
) ||
1474 (error
== NFSERR_STALE_CLIENTID
)) {
1475 lck_mtx_lock(&nmp
->nm_lock
);
1476 nfs_need_recover(nmp
, error
);
1477 lck_mtx_unlock(&nmp
->nm_lock
);
1480 error
= nfs4_setclientid(nmp
);
1486 * NFS socket reconnect routine:
1487 * Called when a connection is broken.
1488 * - disconnect the old socket
1489 * - nfs_connect() again
1490 * - set R_MUSTRESEND for all outstanding requests on mount point
1491 * If this fails the mount point is DEAD!
1494 nfs_reconnect(struct nfsmount
*nmp
)
1498 thread_t thd
= current_thread();
1499 int error
, wentdown
= 0, verbose
= 1;
1503 lastmsg
= now
.tv_sec
- (nmp
->nm_tprintf_delay
- nmp
->nm_tprintf_initial_delay
);
1505 nfs_disconnect(nmp
);
1507 while ((error
= nfs_connect(nmp
, verbose
, 30))) {
1509 nfs_disconnect(nmp
);
1510 if ((error
== EINTR
) || (error
== ERESTART
))
1515 if ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
) {
1516 lastmsg
= now
.tv_sec
;
1517 nfs_down(nmp
, thd
, error
, NFSSTA_TIMEO
, "can not connect");
1520 lck_mtx_lock(&nmp
->nm_lock
);
1521 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1522 /* we're not yet completely mounted and */
1523 /* we can't reconnect, so we fail */
1524 lck_mtx_unlock(&nmp
->nm_lock
);
1527 nfs_mount_check_dead_timeout(nmp
);
1528 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1529 lck_mtx_unlock(&nmp
->nm_lock
);
1532 lck_mtx_unlock(&nmp
->nm_lock
);
1533 tsleep(&lbolt
, PSOCK
, "nfs_reconnect_delay", 0);
1534 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0)))
1539 nfs_up(nmp
, thd
, NFSSTA_TIMEO
, "connected");
1542 * Loop through outstanding request list and mark all requests
1543 * as needing a resend. (Though nfs_need_reconnect() probably
1544 * marked them all already.)
1546 lck_mtx_lock(nfs_request_mutex
);
1547 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1548 if (rq
->r_nmp
== nmp
) {
1549 lck_mtx_lock(&rq
->r_mtx
);
1550 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1551 rq
->r_flags
|= R_MUSTRESEND
;
1554 if ((rq
->r_flags
& (R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
1555 nfs_asyncio_resend(rq
);
1557 lck_mtx_unlock(&rq
->r_mtx
);
1560 lck_mtx_unlock(nfs_request_mutex
);
1565 * NFS disconnect. Clean up and unlink.
1568 nfs_disconnect(struct nfsmount
*nmp
)
1570 struct nfs_socket
*nso
;
1572 lck_mtx_lock(&nmp
->nm_lock
);
1575 struct timespec ts
= { 1, 0 };
1576 if (nmp
->nm_state
& NFSSTA_SENDING
) { /* wait for sending to complete */
1577 nmp
->nm_state
|= NFSSTA_WANTSND
;
1578 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, PZERO
-1, "nfswaitsending", &ts
);
1581 if (nmp
->nm_sockflags
& NMSOCK_POKE
) { /* wait for poking to complete */
1582 msleep(&nmp
->nm_sockflags
, &nmp
->nm_lock
, PZERO
-1, "nfswaitpoke", &ts
);
1585 nmp
->nm_sockflags
|= NMSOCK_DISCONNECTING
;
1586 nmp
->nm_sockflags
&= ~NMSOCK_READY
;
1589 if (nso
->nso_saddr
== nmp
->nm_saddr
)
1590 nso
->nso_saddr
= NULL
;
1591 lck_mtx_unlock(&nmp
->nm_lock
);
1592 nfs_socket_destroy(nso
);
1593 lck_mtx_lock(&nmp
->nm_lock
);
1594 nmp
->nm_sockflags
&= ~NMSOCK_DISCONNECTING
;
1595 lck_mtx_unlock(&nmp
->nm_lock
);
1597 lck_mtx_unlock(&nmp
->nm_lock
);
1602 * mark an NFS mount as needing a reconnect/resends.
1605 nfs_need_reconnect(struct nfsmount
*nmp
)
1609 lck_mtx_lock(&nmp
->nm_lock
);
1610 nmp
->nm_sockflags
&= ~(NMSOCK_READY
|NMSOCK_SETUP
);
1611 lck_mtx_unlock(&nmp
->nm_lock
);
1614 * Loop through outstanding request list and
1615 * mark all requests as needing a resend.
1617 lck_mtx_lock(nfs_request_mutex
);
1618 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1619 if (rq
->r_nmp
== nmp
) {
1620 lck_mtx_lock(&rq
->r_mtx
);
1621 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1622 rq
->r_flags
|= R_MUSTRESEND
;
1625 if ((rq
->r_flags
& (R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
1626 nfs_asyncio_resend(rq
);
1628 lck_mtx_unlock(&rq
->r_mtx
);
1631 lck_mtx_unlock(nfs_request_mutex
);
1636 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
1639 nfs_mount_sock_thread(void *arg
, __unused wait_result_t wr
)
1641 struct nfsmount
*nmp
= arg
;
1642 struct timespec ts
= { 30, 0 };
1643 thread_t thd
= current_thread();
1646 int error
, dofinish
;
1649 lck_mtx_lock(&nmp
->nm_lock
);
1651 while (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
1652 !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
1653 !LIST_EMPTY(&nmp
->nm_monlist
) ||
1654 nmp
->nm_deadto_start
||
1655 (nmp
->nm_state
& NFSSTA_RECOVER
) ||
1656 ((nmp
->nm_vers
>= NFS_VER4
) && !TAILQ_EMPTY(&nmp
->nm_dreturnq
)))
1658 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
)
1660 /* do reconnect, if necessary */
1661 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_FORCE
)) {
1662 if (nmp
->nm_reconnect_start
<= 0) {
1664 nmp
->nm_reconnect_start
= now
.tv_sec
;
1666 lck_mtx_unlock(&nmp
->nm_lock
);
1667 NFS_SOCK_DBG(("nfs reconnect %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
));
1668 if (nfs_reconnect(nmp
) == 0)
1669 nmp
->nm_reconnect_start
= 0;
1670 lck_mtx_lock(&nmp
->nm_lock
);
1672 if ((nmp
->nm_sockflags
& NMSOCK_READY
) &&
1673 (nmp
->nm_state
& NFSSTA_RECOVER
) &&
1674 !(nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
1675 !(nmp
->nm_state
& NFSSTA_FORCE
)) {
1676 /* perform state recovery */
1677 lck_mtx_unlock(&nmp
->nm_lock
);
1679 lck_mtx_lock(&nmp
->nm_lock
);
1681 /* handle NFSv4 delegation returns */
1682 while ((nmp
->nm_vers
>= NFS_VER4
) && !(nmp
->nm_state
& NFSSTA_FORCE
) &&
1683 (nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
) &&
1684 ((np
= TAILQ_FIRST(&nmp
->nm_dreturnq
)))) {
1685 lck_mtx_unlock(&nmp
->nm_lock
);
1686 nfs4_delegation_return(np
, R_RECOVER
, thd
, nmp
->nm_mcred
);
1687 lck_mtx_lock(&nmp
->nm_lock
);
1689 /* do resends, if necessary/possible */
1690 while ((((nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) || (nmp
->nm_state
& NFSSTA_FORCE
)) &&
1691 ((req
= TAILQ_FIRST(&nmp
->nm_resendq
)))) {
1692 if (req
->r_resendtime
)
1694 while (req
&& !(nmp
->nm_state
& NFSSTA_FORCE
) && req
->r_resendtime
&& (now
.tv_sec
< req
->r_resendtime
))
1695 req
= TAILQ_NEXT(req
, r_rchain
);
1698 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
1699 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
1700 lck_mtx_unlock(&nmp
->nm_lock
);
1701 lck_mtx_lock(&req
->r_mtx
);
1702 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
1703 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1704 req
->r_flags
&= ~R_RESENDQ
;
1706 lck_mtx_unlock(&req
->r_mtx
);
1708 nfs_asyncio_finish(req
);
1709 lck_mtx_lock(&nmp
->nm_lock
);
1712 if ((req
->r_flags
& R_RESTART
) || nfs_request_using_gss(req
)) {
1713 req
->r_flags
&= ~R_RESTART
;
1714 req
->r_resendtime
= 0;
1715 lck_mtx_unlock(&req
->r_mtx
);
1716 /* async RPCs on GSS mounts need to be rebuilt and resent. */
1717 nfs_reqdequeue(req
);
1718 if (nfs_request_using_gss(req
)) {
1719 nfs_gss_clnt_rpcdone(req
);
1720 error
= nfs_gss_clnt_args_restore(req
);
1721 if (error
== ENEEDAUTH
)
1724 NFS_SOCK_DBG(("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
1725 nfs_request_using_gss(req
) ? " gss" : "", req
->r_procnum
, req
->r_xid
,
1726 req
->r_flags
, req
->r_rtt
));
1727 error
= !req
->r_nmp
? ENXIO
: 0; /* unmounted? */
1729 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
1731 error
= nfs_request_add_header(req
);
1733 error
= nfs_request_send(req
, 0);
1734 lck_mtx_lock(&req
->r_mtx
);
1735 if (req
->r_flags
& R_RESENDQ
)
1736 req
->r_flags
&= ~R_RESENDQ
;
1738 req
->r_error
= error
;
1740 dofinish
= error
&& req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1741 lck_mtx_unlock(&req
->r_mtx
);
1743 nfs_asyncio_finish(req
);
1744 lck_mtx_lock(&nmp
->nm_lock
);
1748 NFS_SOCK_DBG(("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
1749 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
));
1750 error
= !req
->r_nmp
? ENXIO
: 0; /* unmounted? */
1752 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
1754 req
->r_flags
|= R_SENDING
;
1755 lck_mtx_unlock(&req
->r_mtx
);
1756 error
= nfs_send(req
, 0);
1757 lck_mtx_lock(&req
->r_mtx
);
1759 if (req
->r_flags
& R_RESENDQ
)
1760 req
->r_flags
&= ~R_RESENDQ
;
1762 lck_mtx_unlock(&req
->r_mtx
);
1763 lck_mtx_lock(&nmp
->nm_lock
);
1767 req
->r_error
= error
;
1768 if (req
->r_flags
& R_RESENDQ
)
1769 req
->r_flags
&= ~R_RESENDQ
;
1771 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
1772 lck_mtx_unlock(&req
->r_mtx
);
1774 nfs_asyncio_finish(req
);
1775 lck_mtx_lock(&nmp
->nm_lock
);
1777 if (nmp
->nm_deadto_start
)
1778 nfs_mount_check_dead_timeout(nmp
);
1779 if (nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))
1781 /* check monitored nodes, if necessary/possible */
1782 if (!LIST_EMPTY(&nmp
->nm_monlist
)) {
1783 nmp
->nm_state
|= NFSSTA_MONITOR_SCAN
;
1784 LIST_FOREACH(np
, &nmp
->nm_monlist
, n_monlink
) {
1785 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
|NFSSTA_FORCE
)))
1787 np
->n_mflag
|= NMMONSCANINPROG
;
1788 lck_mtx_unlock(&nmp
->nm_lock
);
1789 error
= nfs_getattr(np
, NULL
, vfs_context_kernel(), (NGA_UNCACHED
|NGA_MONITOR
));
1790 if (!error
&& ISSET(np
->n_flag
, NUPDATESIZE
)) /* update quickly to avoid multiple events */
1791 nfs_data_update_size(np
, 0);
1792 lck_mtx_lock(&nmp
->nm_lock
);
1793 np
->n_mflag
&= ~NMMONSCANINPROG
;
1794 if (np
->n_mflag
& NMMONSCANWANT
) {
1795 np
->n_mflag
&= ~NMMONSCANWANT
;
1796 wakeup(&np
->n_mflag
);
1798 if (error
|| !(nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
|NFSSTA_FORCE
)))
1801 nmp
->nm_state
&= ~NFSSTA_MONITOR_SCAN
;
1802 if (nmp
->nm_state
& NFSSTA_UNMOUNTING
)
1803 wakeup(&nmp
->nm_state
); /* let unmounting thread know scan is done */
1805 if ((nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
|NFSSTA_UNMOUNTING
))) {
1806 if (nmp
->nm_deadto_start
|| !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
1807 (nmp
->nm_state
& NFSSTA_RECOVER
))
1811 msleep(&nmp
->nm_sockthd
, &nmp
->nm_lock
, PSOCK
, "nfssockthread", &ts
);
1815 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
1816 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
1817 (nmp
->nm_state
& NFSSTA_MOUNTED
) && NMFLAG(nmp
, CALLUMNT
) &&
1818 (nmp
->nm_vers
< NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_DEAD
))) {
1819 lck_mtx_unlock(&nmp
->nm_lock
);
1820 nfs3_umount_rpc(nmp
, vfs_context_kernel(),
1821 (nmp
->nm_sockflags
& NMSOCK_READY
) ? 6 : 2);
1822 lck_mtx_lock(&nmp
->nm_lock
);
1825 if (nmp
->nm_sockthd
== thd
)
1826 nmp
->nm_sockthd
= NULL
;
1827 lck_mtx_unlock(&nmp
->nm_lock
);
1828 wakeup(&nmp
->nm_sockthd
);
1829 thread_terminate(thd
);
1832 /* start or wake a mount's socket thread */
1834 nfs_mount_sock_thread_wake(struct nfsmount
*nmp
)
1836 if (nmp
->nm_sockthd
)
1837 wakeup(&nmp
->nm_sockthd
);
1838 else if (kernel_thread_start(nfs_mount_sock_thread
, nmp
, &nmp
->nm_sockthd
) == KERN_SUCCESS
)
1839 thread_deallocate(nmp
->nm_sockthd
);
1843 * Check if we should mark the mount dead because the
1844 * unresponsive mount has reached the dead timeout.
1845 * (must be called with nmp locked)
1848 nfs_mount_check_dead_timeout(struct nfsmount
*nmp
)
1852 if (nmp
->nm_deadtimeout
<= 0)
1854 if (nmp
->nm_deadto_start
== 0)
1856 if (nmp
->nm_state
& NFSSTA_DEAD
)
1859 if ((now
.tv_sec
- nmp
->nm_deadto_start
) < nmp
->nm_deadtimeout
)
1861 printf("nfs server %s: dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1862 nmp
->nm_state
|= NFSSTA_DEAD
;
1863 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_DEAD
, 0);
1867 * NFS callback channel socket state
1869 struct nfs_callback_socket
1871 TAILQ_ENTRY(nfs_callback_socket
) ncbs_link
;
1872 socket_t ncbs_so
; /* the socket */
1873 struct sockaddr_storage ncbs_saddr
; /* socket address */
1874 struct nfs_rpc_record_state ncbs_rrs
; /* RPC record parsing state */
1875 time_t ncbs_stamp
; /* last accessed at */
1876 uint32_t ncbs_flags
; /* see below */
1878 #define NCBSOCK_UPCALL 0x0001
1879 #define NCBSOCK_UPCALLWANT 0x0002
1880 #define NCBSOCK_DEAD 0x0004
1883 * NFS callback channel state
1885 * One listening socket for accepting socket connections from servers and
1886 * a list of connected sockets to handle callback requests on.
1887 * Mounts registered with the callback channel are assigned IDs and
1888 * put on a list so that the callback request handling code can match
1889 * the requests up with mounts.
1891 socket_t nfs4_cb_so
= NULL
;
1892 socket_t nfs4_cb_so6
= NULL
;
1893 in_port_t nfs4_cb_port
= 0;
1894 in_port_t nfs4_cb_port6
= 0;
1895 uint32_t nfs4_cb_id
= 0;
1896 uint32_t nfs4_cb_so_usecount
= 0;
1897 TAILQ_HEAD(nfs4_cb_sock_list
,nfs_callback_socket
) nfs4_cb_socks
;
1898 TAILQ_HEAD(nfs4_cb_mount_list
,nfsmount
) nfs4_cb_mounts
;
1900 int nfs4_cb_handler(struct nfs_callback_socket
*, mbuf_t
);
1903 * Set up the callback channel for the NFS mount.
1905 * Initializes the callback channel socket state and
1906 * assigns a callback ID to the mount.
1909 nfs4_mount_callback_setup(struct nfsmount
*nmp
)
1911 struct sockaddr_in sin
;
1912 struct sockaddr_in6 sin6
;
1914 socket_t so6
= NULL
;
1915 struct timeval timeo
;
1919 lck_mtx_lock(nfs_global_mutex
);
1920 if (nfs4_cb_id
== 0) {
1921 TAILQ_INIT(&nfs4_cb_mounts
);
1922 TAILQ_INIT(&nfs4_cb_socks
);
1925 nmp
->nm_cbid
= nfs4_cb_id
++;
1926 if (nmp
->nm_cbid
== 0)
1927 nmp
->nm_cbid
= nfs4_cb_id
++;
1928 nfs4_cb_so_usecount
++;
1929 TAILQ_INSERT_HEAD(&nfs4_cb_mounts
, nmp
, nm_cblink
);
1932 lck_mtx_unlock(nfs_global_mutex
);
1937 error
= sock_socket(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so
);
1939 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv4 socket\n", error
);
1944 sock_setsockopt(so
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
1945 sin
.sin_len
= sizeof(struct sockaddr_in
);
1946 sin
.sin_family
= AF_INET
;
1947 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
1948 sin
.sin_port
= htons(nfs_callback_port
); /* try to use specified port */
1949 error
= sock_bind(so
, (struct sockaddr
*)&sin
);
1951 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv4 socket\n", error
);
1954 error
= sock_getsockname(so
, (struct sockaddr
*)&sin
, sin
.sin_len
);
1956 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv4 socket port\n", error
);
1959 nfs4_cb_port
= ntohs(sin
.sin_port
);
1961 error
= sock_listen(so
, 32);
1963 log(LOG_INFO
, "nfs callback setup: error %d on IPv4 listen\n", error
);
1967 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
1970 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
1972 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error
);
1973 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
1975 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error
);
1976 sock_setsockopt(so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
1977 sock_setsockopt(so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
1978 sock_setsockopt(so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
1982 error
= sock_socket(AF_INET6
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so6
);
1984 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv6 socket\n", error
);
1989 sock_setsockopt(so6
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
1990 sock_setsockopt(so6
, IPPROTO_IPV6
, IPV6_V6ONLY
, &on
, sizeof(on
));
1991 /* try to use specified port or same port as IPv4 */
1992 port
= nfs_callback_port
? nfs_callback_port
: nfs4_cb_port
;
1994 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
1995 sin6
.sin6_family
= AF_INET6
;
1996 sin6
.sin6_addr
= in6addr_any
;
1997 sin6
.sin6_port
= htons(port
);
1998 error
= sock_bind(so6
, (struct sockaddr
*)&sin6
);
2000 if (port
!= nfs_callback_port
) {
2001 /* if we simply tried to match the IPv4 port, then try any port */
2003 goto ipv6_bind_again
;
2005 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv6 socket\n", error
);
2008 error
= sock_getsockname(so6
, (struct sockaddr
*)&sin6
, sin6
.sin6_len
);
2010 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv6 socket port\n", error
);
2013 nfs4_cb_port6
= ntohs(sin6
.sin6_port
);
2015 error
= sock_listen(so6
, 32);
2017 log(LOG_INFO
, "nfs callback setup: error %d on IPv6 listen\n", error
);
2021 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2024 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2026 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error
);
2027 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2029 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error
);
2030 sock_setsockopt(so6
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2031 sock_setsockopt(so6
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2032 sock_setsockopt(so6
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2037 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2038 lck_mtx_unlock(nfs_global_mutex
);
2040 sock_shutdown(so
, SHUT_RDWR
);
2044 sock_shutdown(so6
, SHUT_RDWR
);
2048 lck_mtx_unlock(nfs_global_mutex
);
2053 * Shut down the callback channel for the NFS mount.
2055 * Clears the mount's callback ID and releases the mounts
2056 * reference on the callback socket. Last reference dropped
2057 * will also shut down the callback socket(s).
2060 nfs4_mount_callback_shutdown(struct nfsmount
*nmp
)
2062 struct nfs_callback_socket
*ncbsp
;
2064 struct nfs4_cb_sock_list cb_socks
;
2065 struct timespec ts
= {1,0};
2067 lck_mtx_lock(nfs_global_mutex
);
2068 TAILQ_REMOVE(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2069 /* wait for any callbacks in progress to complete */
2070 while (nmp
->nm_cbrefs
)
2071 msleep(&nmp
->nm_cbrefs
, nfs_global_mutex
, PSOCK
, "cbshutwait", &ts
);
2073 if (--nfs4_cb_so_usecount
) {
2074 lck_mtx_unlock(nfs_global_mutex
);
2079 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2080 TAILQ_INIT(&cb_socks
);
2081 TAILQ_CONCAT(&cb_socks
, &nfs4_cb_socks
, ncbs_link
);
2082 lck_mtx_unlock(nfs_global_mutex
);
2084 sock_shutdown(so
, SHUT_RDWR
);
2088 sock_shutdown(so6
, SHUT_RDWR
);
2091 while ((ncbsp
= TAILQ_FIRST(&cb_socks
))) {
2092 TAILQ_REMOVE(&cb_socks
, ncbsp
, ncbs_link
);
2093 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2094 sock_close(ncbsp
->ncbs_so
);
2095 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2096 FREE(ncbsp
, M_TEMP
);
2101 * Check periodically for stale/unused nfs callback sockets
2103 #define NFS4_CB_TIMER_PERIOD 30
2104 #define NFS4_CB_IDLE_MAX 300
2106 nfs4_callback_timer(__unused
void *param0
, __unused
void *param1
)
2108 struct nfs_callback_socket
*ncbsp
, *nextncbsp
;
2112 lck_mtx_lock(nfs_global_mutex
);
2113 if (TAILQ_EMPTY(&nfs4_cb_socks
)) {
2114 nfs4_callback_timer_on
= 0;
2115 lck_mtx_unlock(nfs_global_mutex
);
2119 TAILQ_FOREACH_SAFE(ncbsp
, &nfs4_cb_socks
, ncbs_link
, nextncbsp
) {
2120 if (!(ncbsp
->ncbs_flags
& NCBSOCK_DEAD
) &&
2121 (now
.tv_sec
< (ncbsp
->ncbs_stamp
+ NFS4_CB_IDLE_MAX
)))
2123 TAILQ_REMOVE(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2124 lck_mtx_unlock(nfs_global_mutex
);
2125 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2126 sock_close(ncbsp
->ncbs_so
);
2127 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2128 FREE(ncbsp
, M_TEMP
);
2131 nfs4_callback_timer_on
= 1;
2132 nfs_interval_timer_start(nfs4_callback_timer_call
,
2133 NFS4_CB_TIMER_PERIOD
* 1000);
2134 lck_mtx_unlock(nfs_global_mutex
);
2138 * Accept a new callback socket.
2141 nfs4_cb_accept(socket_t so
, __unused
void *arg
, __unused
int waitflag
)
2143 socket_t newso
= NULL
;
2144 struct nfs_callback_socket
*ncbsp
;
2145 struct nfsmount
*nmp
;
2146 struct timeval timeo
, now
;
2147 int error
, on
= 1, ip
;
2149 if (so
== nfs4_cb_so
)
2151 else if (so
== nfs4_cb_so6
)
2156 /* allocate/initialize a new nfs_callback_socket */
2157 MALLOC(ncbsp
, struct nfs_callback_socket
*, sizeof(struct nfs_callback_socket
), M_TEMP
, M_WAITOK
);
2159 log(LOG_ERR
, "nfs callback accept: no memory for new socket\n");
2162 bzero(ncbsp
, sizeof(*ncbsp
));
2163 ncbsp
->ncbs_saddr
.ss_len
= (ip
== 4) ? sizeof(struct sockaddr_in
) : sizeof(struct sockaddr_in6
);
2164 nfs_rpc_record_state_init(&ncbsp
->ncbs_rrs
);
2166 /* accept a new socket */
2167 error
= sock_accept(so
, (struct sockaddr
*)&ncbsp
->ncbs_saddr
,
2168 ncbsp
->ncbs_saddr
.ss_len
, MSG_DONTWAIT
,
2169 nfs4_cb_rcv
, ncbsp
, &newso
);
2171 log(LOG_INFO
, "nfs callback accept: error %d accepting IPv%d socket\n", error
, ip
);
2172 FREE(ncbsp
, M_TEMP
);
2176 /* set up the new socket */
2177 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2180 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2182 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error
, ip
);
2183 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2185 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error
, ip
);
2186 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2187 sock_setsockopt(newso
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2188 sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2189 sock_setsockopt(newso
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2191 ncbsp
->ncbs_so
= newso
;
2193 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2195 lck_mtx_lock(nfs_global_mutex
);
2197 /* add it to the list */
2198 TAILQ_INSERT_HEAD(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2200 /* verify it's from a host we have mounted */
2201 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2202 /* check if socket's source address matches this mount's server address */
2205 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0)
2208 if (!nmp
) /* we don't want this socket, mark it dead */
2209 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2211 /* make sure the callback socket cleanup timer is running */
2212 /* (shorten the timer if we've got a socket we don't want) */
2213 if (!nfs4_callback_timer_on
) {
2214 nfs4_callback_timer_on
= 1;
2215 nfs_interval_timer_start(nfs4_callback_timer_call
,
2216 !nmp
? 500 : (NFS4_CB_TIMER_PERIOD
* 1000));
2217 } else if (!nmp
&& (nfs4_callback_timer_on
< 2)) {
2218 nfs4_callback_timer_on
= 2;
2219 thread_call_cancel(nfs4_callback_timer_call
);
2220 nfs_interval_timer_start(nfs4_callback_timer_call
, 500);
2223 lck_mtx_unlock(nfs_global_mutex
);
2227 * Receive mbufs from callback sockets into RPC records and process each record.
2228 * Detect connection has been closed and shut down.
2231 nfs4_cb_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
2233 struct nfs_callback_socket
*ncbsp
= arg
;
2234 struct timespec ts
= {1,0};
2237 int error
= 0, recv
= 1;
2239 lck_mtx_lock(nfs_global_mutex
);
2240 while (ncbsp
->ncbs_flags
& NCBSOCK_UPCALL
) {
2241 /* wait if upcall is already in progress */
2242 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALLWANT
;
2243 msleep(ncbsp
, nfs_global_mutex
, PSOCK
, "cbupcall", &ts
);
2245 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALL
;
2246 lck_mtx_unlock(nfs_global_mutex
);
2248 /* loop while we make error-free progress */
2249 while (!error
&& recv
) {
2250 error
= nfs_rpc_record_read(so
, &ncbsp
->ncbs_rrs
, MSG_DONTWAIT
, &recv
, &m
);
2251 if (m
) /* handle the request */
2252 error
= nfs4_cb_handler(ncbsp
, m
);
2255 /* note: no error and no data indicates server closed its end */
2256 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
2258 * Socket is either being closed or should be.
2259 * We can't close the socket in the context of the upcall.
2260 * So we mark it as dead and leave it for the cleanup timer to reap.
2262 ncbsp
->ncbs_stamp
= 0;
2263 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2266 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2269 lck_mtx_lock(nfs_global_mutex
);
2270 ncbsp
->ncbs_flags
&= ~NCBSOCK_UPCALL
;
2271 lck_mtx_unlock(nfs_global_mutex
);
2276 * Handle an NFS callback channel request.
2279 nfs4_cb_handler(struct nfs_callback_socket
*ncbsp
, mbuf_t mreq
)
2281 socket_t so
= ncbsp
->ncbs_so
;
2282 struct nfsm_chain nmreq
, nmrep
;
2283 mbuf_t mhead
= NULL
, mrest
= NULL
, m
;
2285 struct nfsmount
*nmp
;
2288 nfs_stateid stateid
;
2289 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], rbitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
, truncate
, attrbytes
;
2290 uint32_t val
, xid
, procnum
, taglen
, cbid
, numops
, op
, status
;
2291 uint32_t auth_type
, auth_len
;
2292 uint32_t numres
, *pnumres
;
2293 int error
= 0, replen
, len
;
2296 xid
= numops
= op
= status
= procnum
= taglen
= cbid
= 0;
2298 nfsm_chain_dissect_init(error
, &nmreq
, mreq
);
2299 nfsm_chain_get_32(error
, &nmreq
, xid
); // RPC XID
2300 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Call
2301 nfsm_assert(error
, (val
== RPC_CALL
), EBADRPC
);
2302 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Version
2303 nfsm_assert(error
, (val
== RPC_VER2
), ERPCMISMATCH
);
2304 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Program Number
2305 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG
), EPROGUNAVAIL
);
2306 nfsm_chain_get_32(error
, &nmreq
, val
); // NFS Callback Program Version Number
2307 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG_VERSION
), EPROGMISMATCH
);
2308 nfsm_chain_get_32(error
, &nmreq
, procnum
); // NFS Callback Procedure Number
2309 nfsm_assert(error
, (procnum
<= NFSPROC4_CB_COMPOUND
), EPROCUNAVAIL
);
2311 /* Handle authentication */
2312 /* XXX just ignore auth for now - handling kerberos may be tricky */
2313 nfsm_chain_get_32(error
, &nmreq
, auth_type
); // RPC Auth Flavor
2314 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // RPC Auth Length
2315 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2316 if (!error
&& (auth_len
> 0))
2317 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2318 nfsm_chain_adv(error
, &nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
2319 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // verifier length
2320 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2321 if (!error
&& (auth_len
> 0))
2322 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2330 case NFSPROC4_CB_NULL
:
2331 status
= NFSERR_RETVOID
;
2333 case NFSPROC4_CB_COMPOUND
:
2334 /* tag, minorversion, cb ident, numops, op array */
2335 nfsm_chain_get_32(error
, &nmreq
, taglen
); /* tag length */
2336 nfsm_assert(error
, (val
<= NFS4_OPAQUE_LIMIT
), EBADRPC
);
2338 /* start building the body of the response */
2339 nfsm_mbuf_get(error
, &mrest
, nfsm_rndup(taglen
) + 5*NFSX_UNSIGNED
);
2340 nfsm_chain_init(&nmrep
, mrest
);
2342 /* copy tag from request to response */
2343 nfsm_chain_add_32(error
, &nmrep
, taglen
); /* tag length */
2344 for (len
= (int)taglen
; !error
&& (len
> 0); len
-= NFSX_UNSIGNED
) {
2345 nfsm_chain_get_32(error
, &nmreq
, val
);
2346 nfsm_chain_add_32(error
, &nmrep
, val
);
2349 /* insert number of results placeholder */
2351 nfsm_chain_add_32(error
, &nmrep
, numres
);
2352 pnumres
= (uint32_t*)(nmrep
.nmc_ptr
- NFSX_UNSIGNED
);
2354 nfsm_chain_get_32(error
, &nmreq
, val
); /* minorversion */
2355 nfsm_assert(error
, (val
== 0), NFSERR_MINOR_VERS_MISMATCH
);
2356 nfsm_chain_get_32(error
, &nmreq
, cbid
); /* callback ID */
2357 nfsm_chain_get_32(error
, &nmreq
, numops
); /* number of operations */
2359 if ((error
== EBADRPC
) || (error
== NFSERR_MINOR_VERS_MISMATCH
))
2361 else if ((error
== ENOBUFS
) || (error
== ENOMEM
))
2362 status
= NFSERR_RESOURCE
;
2364 status
= NFSERR_SERVERFAULT
;
2366 nfsm_chain_null(&nmrep
);
2369 /* match the callback ID to a registered mount */
2370 lck_mtx_lock(nfs_global_mutex
);
2371 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2372 if (nmp
->nm_cbid
!= cbid
)
2374 /* verify socket's source address matches this mount's server address */
2377 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0)
2380 /* mark the NFS mount as busy */
2383 lck_mtx_unlock(nfs_global_mutex
);
2385 /* if no mount match, just drop socket. */
2387 nfsm_chain_null(&nmrep
);
2391 /* process ops, adding results to mrest */
2392 while (numops
> 0) {
2394 nfsm_chain_get_32(error
, &nmreq
, op
);
2398 case NFS_OP_CB_GETATTR
:
2399 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2401 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2402 bmlen
= NFS_ATTR_BITMAP_LEN
;
2403 nfsm_chain_get_bitmap(error
, &nmreq
, bitmap
, bmlen
);
2407 numops
= 0; /* don't process any more ops */
2409 /* find the node for the file handle */
2410 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2412 status
= NFSERR_BADHANDLE
;
2415 numops
= 0; /* don't process any more ops */
2418 nfsm_chain_add_32(error
, &nmrep
, op
);
2419 nfsm_chain_add_32(error
, &nmrep
, status
);
2420 if (!error
&& (status
== EBADRPC
))
2423 /* only allow returning size, change, and mtime attrs */
2424 NFS_CLEAR_ATTRIBUTES(&rbitmap
);
2426 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
2427 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_CHANGE
);
2428 attrbytes
+= 2 * NFSX_UNSIGNED
;
2430 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
2431 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_SIZE
);
2432 attrbytes
+= 2 * NFSX_UNSIGNED
;
2434 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2435 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_TIME_MODIFY
);
2436 attrbytes
+= 3 * NFSX_UNSIGNED
;
2438 nfsm_chain_add_bitmap(error
, &nmrep
, rbitmap
, NFS_ATTR_BITMAP_LEN
);
2439 nfsm_chain_add_32(error
, &nmrep
, attrbytes
);
2440 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
))
2441 nfsm_chain_add_64(error
, &nmrep
,
2442 np
->n_vattr
.nva_change
+ ((np
->n_flag
& NMODIFIED
) ? 1 : 0));
2443 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
))
2444 nfsm_chain_add_64(error
, &nmrep
, np
->n_size
);
2445 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2446 nfsm_chain_add_64(error
, &nmrep
, np
->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]);
2447 nfsm_chain_add_32(error
, &nmrep
, np
->n_vattr
.nva_timensec
[NFSTIME_MODIFY
]);
2449 nfs_node_unlock(np
);
2450 vnode_put(NFSTOV(np
));
2454 * If we hit an error building the reply, we can't easily back up.
2455 * So we'll just update the status and hope the server ignores the
2459 case NFS_OP_CB_RECALL
:
2460 // (STATEID, TRUNCATE, FH) -> (STATUS)
2462 nfsm_chain_get_stateid(error
, &nmreq
, &stateid
);
2463 nfsm_chain_get_32(error
, &nmreq
, truncate
);
2464 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, &fh
);
2468 numops
= 0; /* don't process any more ops */
2470 /* find the node for the file handle */
2471 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
.fh_data
, fh
.fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2473 status
= NFSERR_BADHANDLE
;
2476 numops
= 0; /* don't process any more ops */
2477 } else if (!(np
->n_openflags
& N_DELEG_MASK
) ||
2478 bcmp(&np
->n_dstateid
, &stateid
, sizeof(stateid
))) {
2479 /* delegation stateid state doesn't match */
2480 status
= NFSERR_BAD_STATEID
;
2481 numops
= 0; /* don't process any more ops */
2483 if (!status
) /* add node to recall queue, and wake socket thread */
2484 nfs4_delegation_return_enqueue(np
);
2486 nfs_node_unlock(np
);
2487 vnode_put(NFSTOV(np
));
2490 nfsm_chain_add_32(error
, &nmrep
, op
);
2491 nfsm_chain_add_32(error
, &nmrep
, status
);
2492 if (!error
&& (status
== EBADRPC
))
2495 case NFS_OP_CB_ILLEGAL
:
2497 nfsm_chain_add_32(error
, &nmrep
, NFS_OP_CB_ILLEGAL
);
2498 status
= NFSERR_OP_ILLEGAL
;
2499 nfsm_chain_add_32(error
, &nmrep
, status
);
2500 numops
= 0; /* don't process any more ops */
2506 if (!status
&& error
) {
2507 if (error
== EBADRPC
)
2509 else if ((error
== ENOBUFS
) || (error
== ENOMEM
))
2510 status
= NFSERR_RESOURCE
;
2512 status
= NFSERR_SERVERFAULT
;
2516 /* Now, set the numres field */
2517 *pnumres
= txdr_unsigned(numres
);
2518 nfsm_chain_build_done(error
, &nmrep
);
2519 nfsm_chain_null(&nmrep
);
2521 /* drop the callback reference on the mount */
2522 lck_mtx_lock(nfs_global_mutex
);
2525 wakeup(&nmp
->nm_cbrefs
);
2526 lck_mtx_unlock(nfs_global_mutex
);
2531 if (status
== EBADRPC
)
2532 OSAddAtomic(1, &nfsstats
.rpcinvalid
);
2534 /* build reply header */
2535 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mhead
);
2536 nfsm_chain_init(&nmrep
, mhead
);
2537 nfsm_chain_add_32(error
, &nmrep
, 0); /* insert space for an RPC record mark */
2538 nfsm_chain_add_32(error
, &nmrep
, xid
);
2539 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
2540 if ((status
== ERPCMISMATCH
) || (status
& NFSERR_AUTHERR
)) {
2541 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
2542 if (status
& NFSERR_AUTHERR
) {
2543 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
2544 nfsm_chain_add_32(error
, &nmrep
, (status
& ~NFSERR_AUTHERR
));
2546 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
2547 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2548 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
2552 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
2553 /* XXX RPCAUTH_NULL verifier */
2554 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
2555 nfsm_chain_add_32(error
, &nmrep
, 0);
2556 /* accepted status */
2559 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
2562 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
2563 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2564 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
2567 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
2570 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
2573 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
2574 if (status
!= NFSERR_RETVOID
)
2575 nfsm_chain_add_32(error
, &nmrep
, status
);
2579 nfsm_chain_build_done(error
, &nmrep
);
2581 nfsm_chain_null(&nmrep
);
2584 error
= mbuf_setnext(nmrep
.nmc_mcur
, mrest
);
2586 printf("nfs cb: mbuf_setnext failed %d\n", error
);
2590 /* Calculate the size of the reply */
2592 for (m
= nmrep
.nmc_mhead
; m
; m
= mbuf_next(m
))
2593 replen
+= mbuf_len(m
);
2594 mbuf_pkthdr_setlen(mhead
, replen
);
2595 error
= mbuf_pkthdr_setrcvif(mhead
, NULL
);
2596 nfsm_chain_set_recmark(error
, &nmrep
, (replen
- NFSX_UNSIGNED
) | 0x80000000);
2597 nfsm_chain_null(&nmrep
);
2599 /* send the reply */
2600 bzero(&msg
, sizeof(msg
));
2601 error
= sock_sendmbuf(so
, &msg
, mhead
, 0, &sentlen
);
2603 if (!error
&& ((int)sentlen
!= replen
))
2604 error
= EWOULDBLOCK
;
2605 if (error
== EWOULDBLOCK
) /* inability to send response is considered fatal */
2609 nfsm_chain_cleanup(&nmrep
);
2621 * Initialize an nfs_rpc_record_state structure.
2624 nfs_rpc_record_state_init(struct nfs_rpc_record_state
*nrrsp
)
2626 bzero(nrrsp
, sizeof(*nrrsp
));
2627 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
2631 * Clean up an nfs_rpc_record_state structure.
2634 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state
*nrrsp
)
2636 if (nrrsp
->nrrs_m
) {
2637 mbuf_freem(nrrsp
->nrrs_m
);
2638 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
2643 * Read the next (marked) RPC record from the socket.
2645 * *recvp returns if any data was received.
2646 * *mp returns the next complete RPC record
2649 nfs_rpc_record_read(socket_t so
, struct nfs_rpc_record_state
*nrrsp
, int flags
, int *recvp
, mbuf_t
*mp
)
2660 /* read the TCP RPC record marker */
2661 while (!error
&& nrrsp
->nrrs_markerleft
) {
2662 aio
.iov_base
= ((char*)&nrrsp
->nrrs_fragleft
+
2663 sizeof(nrrsp
->nrrs_fragleft
) - nrrsp
->nrrs_markerleft
);
2664 aio
.iov_len
= nrrsp
->nrrs_markerleft
;
2665 bzero(&msg
, sizeof(msg
));
2668 error
= sock_receive(so
, &msg
, flags
, &rcvlen
);
2669 if (error
|| !rcvlen
)
2672 nrrsp
->nrrs_markerleft
-= rcvlen
;
2673 if (nrrsp
->nrrs_markerleft
)
2675 /* record marker complete */
2676 nrrsp
->nrrs_fragleft
= ntohl(nrrsp
->nrrs_fragleft
);
2677 if (nrrsp
->nrrs_fragleft
& 0x80000000) {
2678 nrrsp
->nrrs_lastfrag
= 1;
2679 nrrsp
->nrrs_fragleft
&= ~0x80000000;
2681 nrrsp
->nrrs_reclen
+= nrrsp
->nrrs_fragleft
;
2682 if (nrrsp
->nrrs_reclen
> NFS_MAXPACKET
) {
2683 /* This is SERIOUS! We are out of sync with the sender. */
2684 log(LOG_ERR
, "impossible RPC record length (%d) on callback", nrrsp
->nrrs_reclen
);
2689 /* read the TCP RPC record fragment */
2690 while (!error
&& !nrrsp
->nrrs_markerleft
&& nrrsp
->nrrs_fragleft
) {
2692 rcvlen
= nrrsp
->nrrs_fragleft
;
2693 error
= sock_receivembuf(so
, NULL
, &m
, flags
, &rcvlen
);
2694 if (error
|| !rcvlen
|| !m
)
2697 /* append mbufs to list */
2698 nrrsp
->nrrs_fragleft
-= rcvlen
;
2699 if (!nrrsp
->nrrs_m
) {
2702 error
= mbuf_setnext(nrrsp
->nrrs_mlast
, m
);
2704 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error
);
2709 while (mbuf_next(m
))
2711 nrrsp
->nrrs_mlast
= m
;
2714 /* done reading fragment? */
2715 if (!error
&& !nrrsp
->nrrs_markerleft
&& !nrrsp
->nrrs_fragleft
) {
2716 /* reset socket fragment parsing state */
2717 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
2718 if (nrrsp
->nrrs_lastfrag
) {
2719 /* RPC record complete */
2720 *mp
= nrrsp
->nrrs_m
;
2721 /* reset socket record parsing state */
2722 nrrsp
->nrrs_reclen
= 0;
2723 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
2724 nrrsp
->nrrs_lastfrag
= 0;
2734 * The NFS client send routine.
2736 * Send the given NFS request out the mount's socket.
2737 * Holds nfs_sndlock() for the duration of this call.
2739 * - check for request termination (sigintr)
2740 * - wait for reconnect, if necessary
2741 * - UDP: check the congestion window
2742 * - make a copy of the request to send
2743 * - UDP: update the congestion window
2744 * - send the request
2746 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
2747 * rexmit count is also updated if this isn't the first send.
2749 * If the send is not successful, make sure R_MUSTRESEND is set.
2750 * If this wasn't the first transmit, set R_RESENDERR.
2751 * Also, undo any UDP congestion window changes made.
2753 * If the error appears to indicate that the socket should
2754 * be reconnected, mark the socket for reconnection.
2756 * Only return errors when the request should be aborted.
2759 nfs_send(struct nfsreq
*req
, int wait
)
2761 struct nfsmount
*nmp
;
2762 struct nfs_socket
*nso
;
2763 int error
, error2
, sotype
, rexmit
, slpflag
= 0, needrecon
;
2765 struct sockaddr
*sendnam
;
2768 struct timespec ts
= { 2, 0 };
2771 error
= nfs_sndlock(req
);
2773 lck_mtx_lock(&req
->r_mtx
);
2774 req
->r_error
= error
;
2775 req
->r_flags
&= ~R_SENDING
;
2776 lck_mtx_unlock(&req
->r_mtx
);
2780 error
= nfs_sigintr(req
->r_nmp
, req
, NULL
, 0);
2783 lck_mtx_lock(&req
->r_mtx
);
2784 req
->r_error
= error
;
2785 req
->r_flags
&= ~R_SENDING
;
2786 lck_mtx_unlock(&req
->r_mtx
);
2790 sotype
= nmp
->nm_sotype
;
2793 * If it's a setup RPC but we're not in SETUP... must need reconnect.
2794 * If it's a recovery RPC but the socket's not ready... must need reconnect.
2796 if (((req
->r_flags
& R_SETUP
) && !(nmp
->nm_sockflags
& NMSOCK_SETUP
)) ||
2797 ((req
->r_flags
& R_RECOVER
) && !(nmp
->nm_sockflags
& NMSOCK_READY
))) {
2800 lck_mtx_lock(&req
->r_mtx
);
2801 req
->r_error
= error
;
2802 req
->r_flags
&= ~R_SENDING
;
2803 lck_mtx_unlock(&req
->r_mtx
);
2807 /* If the socket needs reconnection, do that now. */
2808 /* wait until socket is ready - unless this request is part of setup */
2809 lck_mtx_lock(&nmp
->nm_lock
);
2810 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) &&
2811 !((nmp
->nm_sockflags
& NMSOCK_SETUP
) && (req
->r_flags
& R_SETUP
))) {
2812 if (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
))
2814 lck_mtx_unlock(&nmp
->nm_lock
);
2817 lck_mtx_lock(&req
->r_mtx
);
2818 req
->r_flags
&= ~R_SENDING
;
2819 req
->r_flags
|= R_MUSTRESEND
;
2821 lck_mtx_unlock(&req
->r_mtx
);
2824 NFS_SOCK_DBG(("nfs_send: 0x%llx wait reconnect\n", req
->r_xid
));
2825 lck_mtx_lock(&req
->r_mtx
);
2826 req
->r_flags
&= ~R_MUSTRESEND
;
2828 lck_mtx_unlock(&req
->r_mtx
);
2829 lck_mtx_lock(&nmp
->nm_lock
);
2830 while (!(nmp
->nm_sockflags
& NMSOCK_READY
)) {
2831 /* don't bother waiting if the socket thread won't be reconnecting it */
2832 if (nmp
->nm_state
& NFSSTA_FORCE
) {
2836 if (NMFLAG(nmp
, SOFT
) && (nmp
->nm_reconnect_start
> 0)) {
2839 if ((now
.tv_sec
- nmp
->nm_reconnect_start
) >= 8) {
2840 /* soft mount in reconnect for a while... terminate ASAP */
2841 OSAddAtomic(1, &nfsstats
.rpctimeouts
);
2842 req
->r_flags
|= R_SOFTTERM
;
2843 req
->r_error
= error
= ETIMEDOUT
;
2847 /* make sure socket thread is running, then wait */
2848 nfs_mount_sock_thread_wake(nmp
);
2849 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1)))
2851 msleep(req
, &nmp
->nm_lock
, slpflag
|PSOCK
, "nfsconnectwait", &ts
);
2854 lck_mtx_unlock(&nmp
->nm_lock
);
2856 lck_mtx_lock(&req
->r_mtx
);
2857 req
->r_error
= error
;
2858 req
->r_flags
&= ~R_SENDING
;
2859 lck_mtx_unlock(&req
->r_mtx
);
2865 /* note that we're using the mount's socket to do the send */
2866 nmp
->nm_state
|= NFSSTA_SENDING
; /* will be cleared by nfs_sndunlock() */
2867 lck_mtx_unlock(&nmp
->nm_lock
);
2870 lck_mtx_lock(&req
->r_mtx
);
2871 req
->r_flags
&= ~R_SENDING
;
2872 req
->r_flags
|= R_MUSTRESEND
;
2874 lck_mtx_unlock(&req
->r_mtx
);
2878 lck_mtx_lock(&req
->r_mtx
);
2879 rexmit
= (req
->r_flags
& R_SENT
);
2881 if (sotype
== SOCK_DGRAM
) {
2882 lck_mtx_lock(&nmp
->nm_lock
);
2883 if (!(req
->r_flags
& R_CWND
) && (nmp
->nm_sent
>= nmp
->nm_cwnd
)) {
2884 /* if we can't send this out yet, wait on the cwnd queue */
2885 slpflag
= (NMFLAG(nmp
, INTR
) && req
->r_thread
) ? PCATCH
: 0;
2886 lck_mtx_unlock(&nmp
->nm_lock
);
2888 req
->r_flags
&= ~R_SENDING
;
2889 req
->r_flags
|= R_MUSTRESEND
;
2890 lck_mtx_unlock(&req
->r_mtx
);
2895 lck_mtx_lock(&nmp
->nm_lock
);
2896 while (nmp
->nm_sent
>= nmp
->nm_cwnd
) {
2897 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1)))
2899 TAILQ_INSERT_TAIL(&nmp
->nm_cwndq
, req
, r_cchain
);
2900 msleep(req
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfswaitcwnd", &ts
);
2902 if ((req
->r_cchain
.tqe_next
!= NFSREQNOLIST
)) {
2903 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
2904 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
2907 lck_mtx_unlock(&nmp
->nm_lock
);
2911 * We update these *before* the send to avoid racing
2912 * against others who may be looking to send requests.
2915 /* first transmit */
2916 req
->r_flags
|= R_CWND
;
2917 nmp
->nm_sent
+= NFS_CWNDSCALE
;
2920 * When retransmitting, turn timing off
2921 * and divide congestion window by 2.
2923 req
->r_flags
&= ~R_TIMING
;
2925 if (nmp
->nm_cwnd
< NFS_CWNDSCALE
)
2926 nmp
->nm_cwnd
= NFS_CWNDSCALE
;
2928 lck_mtx_unlock(&nmp
->nm_lock
);
2931 req
->r_flags
&= ~R_MUSTRESEND
;
2932 lck_mtx_unlock(&req
->r_mtx
);
2934 error
= mbuf_copym(req
->r_mhead
, 0, MBUF_COPYALL
,
2935 wait
? MBUF_WAITOK
: MBUF_DONTWAIT
, &mreqcopy
);
2938 log(LOG_INFO
, "nfs_send: mbuf copy failed %d\n", error
);
2940 lck_mtx_lock(&req
->r_mtx
);
2941 req
->r_flags
&= ~R_SENDING
;
2942 req
->r_flags
|= R_MUSTRESEND
;
2944 lck_mtx_unlock(&req
->r_mtx
);
2948 bzero(&msg
, sizeof(msg
));
2949 if ((sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
) && ((sendnam
= nmp
->nm_saddr
))) {
2950 msg
.msg_name
= (caddr_t
)sendnam
;
2951 msg
.msg_namelen
= sendnam
->sa_len
;
2953 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreqcopy
, 0, &sentlen
);
2954 #ifdef NFS_SOCKET_DEBUGGING
2955 if (error
|| (sentlen
!= req
->r_mreqlen
))
2956 NFS_SOCK_DBG(("nfs_send: 0x%llx sent %d/%d error %d\n",
2957 req
->r_xid
, (int)sentlen
, (int)req
->r_mreqlen
, error
));
2959 if (!error
&& (sentlen
!= req
->r_mreqlen
))
2960 error
= EWOULDBLOCK
;
2961 needrecon
= ((sotype
== SOCK_STREAM
) && sentlen
&& (sentlen
!= req
->r_mreqlen
));
2963 lck_mtx_lock(&req
->r_mtx
);
2964 req
->r_flags
&= ~R_SENDING
;
2966 if (rexmit
&& (++req
->r_rexmit
> NFS_MAXREXMIT
))
2967 req
->r_rexmit
= NFS_MAXREXMIT
;
2971 req
->r_flags
&= ~R_RESENDERR
;
2973 OSAddAtomic(1, &nfsstats
.rpcretries
);
2974 req
->r_flags
|= R_SENT
;
2975 if (req
->r_flags
& R_WAITSENT
) {
2976 req
->r_flags
&= ~R_WAITSENT
;
2980 lck_mtx_unlock(&req
->r_mtx
);
2985 req
->r_flags
|= R_MUSTRESEND
;
2987 req
->r_flags
|= R_RESENDERR
;
2988 if ((error
== EINTR
) || (error
== ERESTART
))
2989 req
->r_error
= error
;
2990 lck_mtx_unlock(&req
->r_mtx
);
2992 if (sotype
== SOCK_DGRAM
) {
2994 * Note: even though a first send may fail, we consider
2995 * the request sent for congestion window purposes.
2996 * So we don't need to undo any of the changes made above.
2999 * Socket errors ignored for connectionless sockets??
3000 * For now, ignore them all
3002 if ((error
!= EINTR
) && (error
!= ERESTART
) &&
3003 (error
!= EWOULDBLOCK
) && (error
!= EIO
) && (nso
== nmp
->nm_nso
)) {
3004 int clearerror
= 0, optlen
= sizeof(clearerror
);
3005 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &clearerror
, &optlen
);
3006 #ifdef NFS_SOCKET_DEBUGGING
3008 NFS_SOCK_DBG(("nfs_send: ignoring UDP socket error %d so %d\n",
3009 error
, clearerror
));
3014 /* check if it appears we should reconnect the socket */
3017 /* if send timed out, reconnect if on TCP */
3018 if (sotype
!= SOCK_STREAM
)
3035 if (needrecon
&& (nso
== nmp
->nm_nso
)) { /* mark socket as needing reconnect */
3036 NFS_SOCK_DBG(("nfs_send: 0x%llx need reconnect %d\n", req
->r_xid
, error
));
3037 nfs_need_reconnect(nmp
);
3043 * Don't log some errors:
3044 * EPIPE errors may be common with servers that drop idle connections.
3045 * EADDRNOTAVAIL may occur on network transitions.
3046 * ENOTCONN may occur under some network conditions.
3048 if ((error
== EPIPE
) || (error
== EADDRNOTAVAIL
) || (error
== ENOTCONN
))
3050 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
))
3051 log(LOG_INFO
, "nfs send error %d for server %s\n", error
,
3052 !req
->r_nmp
? "<unmounted>" :
3053 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
);
3055 /* prefer request termination error over other errors */
3056 error2
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0);
3060 /* only allow the following errors to be returned */
3061 if ((error
!= EINTR
) && (error
!= ERESTART
) && (error
!= EIO
) &&
3062 (error
!= ENXIO
) && (error
!= ETIMEDOUT
))
3068 * NFS client socket upcalls
3070 * Pull RPC replies out of an NFS mount's socket and match them
3071 * up with the pending request.
3073 * The datagram code is simple because we always get whole
3074 * messages out of the socket.
3076 * The stream code is more involved because we have to parse
3077 * the RPC records out of the stream.
3080 /* NFS client UDP socket upcall */
3082 nfs_udp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3084 struct nfsmount
*nmp
= arg
;
3085 struct nfs_socket
*nso
= nmp
->nm_nso
;
3090 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
)
3094 /* make sure we're on the current socket */
3095 if (!nso
|| (nso
->nso_so
!= so
))
3100 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
3102 nfs_request_match_reply(nmp
, m
);
3103 } while (m
&& !error
);
3105 if (error
&& (error
!= EWOULDBLOCK
)) {
3106 /* problems with the socket... mark for reconnection */
3107 NFS_SOCK_DBG(("nfs_udp_rcv: need reconnect %d\n", error
));
3108 nfs_need_reconnect(nmp
);
3112 /* NFS client TCP socket upcall */
3114 nfs_tcp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3116 struct nfsmount
*nmp
= arg
;
3117 struct nfs_socket
*nso
= nmp
->nm_nso
;
3118 struct nfs_rpc_record_state nrrs
;
3123 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
)
3126 /* make sure we're on the current socket */
3127 lck_mtx_lock(&nmp
->nm_lock
);
3129 if (!nso
|| (nso
->nso_so
!= so
) || (nmp
->nm_sockflags
& (NMSOCK_DISCONNECTING
))) {
3130 lck_mtx_unlock(&nmp
->nm_lock
);
3133 lck_mtx_unlock(&nmp
->nm_lock
);
3135 /* make sure this upcall should be trying to do work */
3136 lck_mtx_lock(&nso
->nso_lock
);
3137 if (nso
->nso_flags
& (NSO_UPCALL
|NSO_DISCONNECTING
|NSO_DEAD
)) {
3138 lck_mtx_unlock(&nso
->nso_lock
);
3141 nso
->nso_flags
|= NSO_UPCALL
;
3142 nrrs
= nso
->nso_rrs
;
3143 lck_mtx_unlock(&nso
->nso_lock
);
3145 /* loop while we make error-free progress */
3146 while (!error
&& recv
) {
3147 error
= nfs_rpc_record_read(so
, &nrrs
, MSG_DONTWAIT
, &recv
, &m
);
3148 if (m
) /* match completed response with request */
3149 nfs_request_match_reply(nmp
, m
);
3152 lck_mtx_lock(&nmp
->nm_lock
);
3153 if (nmp
->nm_nso
== nso
) {
3154 /* still the same socket, so update socket's RPC parsing state */
3155 lck_mtx_unlock(&nmp
->nm_lock
);
3156 lck_mtx_lock(&nso
->nso_lock
);
3157 nso
->nso_rrs
= nrrs
;
3158 nso
->nso_flags
&= ~NSO_UPCALL
;
3159 lck_mtx_unlock(&nso
->nso_lock
);
3160 if (nmp
->nm_sockflags
& NMSOCK_DISCONNECTING
)
3161 wakeup(&nmp
->nm_sockflags
);
3163 lck_mtx_unlock(&nmp
->nm_lock
);
3165 #ifdef NFS_SOCKET_DEBUGGING
3166 if (!recv
&& (error
!= EWOULDBLOCK
))
3167 NFS_SOCK_DBG(("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error
));
3169 /* note: no error and no data indicates server closed its end */
3170 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
3171 /* problems with the socket... mark for reconnection */
3172 NFS_SOCK_DBG(("nfs_tcp_rcv: need reconnect %d\n", error
));
3173 nfs_need_reconnect(nmp
);
3178 * "poke" a socket to try to provoke any pending errors
3181 nfs_sock_poke(struct nfsmount
*nmp
)
3189 lck_mtx_lock(&nmp
->nm_lock
);
3190 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) ||
3191 !(nmp
->nm_sockflags
& NMSOCK_READY
) || !nmp
->nm_nso
|| !nmp
->nm_nso
->nso_so
) {
3192 lck_mtx_unlock(&nmp
->nm_lock
);
3195 lck_mtx_unlock(&nmp
->nm_lock
);
3196 aio
.iov_base
= &dummy
;
3199 bzero(&msg
, sizeof(msg
));
3202 error
= sock_send(nmp
->nm_nso
->nso_so
, &msg
, MSG_DONTWAIT
, &len
);
3203 NFS_SOCK_DBG(("nfs_sock_poke: error %d\n", error
));
3207 * Match an RPC reply with the corresponding request
3210 nfs_request_match_reply(struct nfsmount
*nmp
, mbuf_t mrep
)
3213 struct nfsm_chain nmrep
;
3214 u_int32_t reply
= 0, rxid
= 0;
3215 int error
= 0, asyncioq
, t1
;
3217 /* Get the xid and check that it is an rpc reply */
3218 nfsm_chain_dissect_init(error
, &nmrep
, mrep
);
3219 nfsm_chain_get_32(error
, &nmrep
, rxid
);
3220 nfsm_chain_get_32(error
, &nmrep
, reply
);
3221 if (error
|| (reply
!= RPC_REPLY
)) {
3222 OSAddAtomic(1, &nfsstats
.rpcinvalid
);
3228 * Loop through the request list to match up the reply
3229 * Iff no match, just drop it.
3231 lck_mtx_lock(nfs_request_mutex
);
3232 TAILQ_FOREACH(req
, &nfs_reqq
, r_chain
) {
3233 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
)))
3235 /* looks like we have it, grab lock and double check */
3236 lck_mtx_lock(&req
->r_mtx
);
3237 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3238 lck_mtx_unlock(&req
->r_mtx
);
3242 req
->r_nmrep
= nmrep
;
3243 lck_mtx_lock(&nmp
->nm_lock
);
3244 if (nmp
->nm_sotype
== SOCK_DGRAM
) {
3246 * Update congestion window.
3247 * Do the additive increase of one rpc/rtt.
3249 FSDBG(530, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3250 if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
3252 ((NFS_CWNDSCALE
* NFS_CWNDSCALE
) +
3253 (nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
3254 if (nmp
->nm_cwnd
> NFS_MAXCWND
)
3255 nmp
->nm_cwnd
= NFS_MAXCWND
;
3257 if (req
->r_flags
& R_CWND
) {
3258 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3259 req
->r_flags
&= ~R_CWND
;
3261 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3262 /* congestion window is open, poke the cwnd queue */
3263 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3264 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3265 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3270 * Update rtt using a gain of 0.125 on the mean
3271 * and a gain of 0.25 on the deviation.
3273 if (req
->r_flags
& R_TIMING
) {
3275 * Since the timer resolution of
3276 * NFS_HZ is so course, it can often
3277 * result in r_rtt == 0. Since
3278 * r_rtt == N means that the actual
3279 * rtt is between N+dt and N+2-dt ticks,
3282 if (proct
[req
->r_procnum
] == 0)
3283 panic("nfs_request_match_reply: proct[%d] is zero", req
->r_procnum
);
3284 t1
= req
->r_rtt
+ 1;
3285 t1
-= (NFS_SRTT(req
) >> 3);
3286 NFS_SRTT(req
) += t1
;
3289 t1
-= (NFS_SDRTT(req
) >> 2);
3290 NFS_SDRTT(req
) += t1
;
3292 nmp
->nm_timeouts
= 0;
3293 lck_mtx_unlock(&nmp
->nm_lock
);
3294 /* signal anyone waiting on this request */
3296 asyncioq
= (req
->r_callback
.rcb_func
!= NULL
);
3297 if (nfs_request_using_gss(req
))
3298 nfs_gss_clnt_rpcdone(req
);
3299 lck_mtx_unlock(&req
->r_mtx
);
3300 lck_mtx_unlock(nfs_request_mutex
);
3301 /* if it's an async RPC with a callback, queue it up */
3303 nfs_asyncio_finish(req
);
3308 /* not matched to a request, so drop it. */
3309 lck_mtx_unlock(nfs_request_mutex
);
3310 OSAddAtomic(1, &nfsstats
.rpcunexpected
);
3316 * Wait for the reply for a given request...
3317 * ...potentially resending the request if necessary.
3320 nfs_wait_reply(struct nfsreq
*req
)
3322 struct timespec ts
= { 2, 0 };
3323 int error
= 0, slpflag
, first
= 1;
3325 if (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
))
3330 lck_mtx_lock(&req
->r_mtx
);
3331 while (!req
->r_nmrep
.nmc_mhead
) {
3332 if ((error
= nfs_sigintr(req
->r_nmp
, req
, first
? NULL
: req
->r_thread
, 0)))
3334 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
)
3336 /* check if we need to resend */
3337 if (req
->r_flags
& R_MUSTRESEND
) {
3338 NFS_SOCK_DBG(("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3339 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
));
3340 req
->r_flags
|= R_SENDING
;
3341 lck_mtx_unlock(&req
->r_mtx
);
3342 if (nfs_request_using_gss(req
)) {
3344 * It's an RPCSEC_GSS request.
3345 * Can't just resend the original request
3346 * without bumping the cred sequence number.
3347 * Go back and re-build the request.
3349 lck_mtx_lock(&req
->r_mtx
);
3350 req
->r_flags
&= ~R_SENDING
;
3351 lck_mtx_unlock(&req
->r_mtx
);
3354 error
= nfs_send(req
, 1);
3355 lck_mtx_lock(&req
->r_mtx
);
3356 NFS_SOCK_DBG(("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3357 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
, error
));
3360 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
)
3363 /* need to poll if we're P_NOREMOTEHANG */
3364 if (nfs_noremotehang(req
->r_thread
))
3366 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitreply", &ts
);
3367 first
= slpflag
= 0;
3369 lck_mtx_unlock(&req
->r_mtx
);
3375 * An NFS request goes something like this:
3376 * (nb: always frees up mreq mbuf list)
3377 * nfs_request_create()
3378 * - allocates a request struct if one is not provided
3379 * - initial fill-in of the request struct
3380 * nfs_request_add_header()
3381 * - add the RPC header
3382 * nfs_request_send()
3383 * - link it into list
3384 * - call nfs_send() for first transmit
3385 * nfs_request_wait()
3386 * - call nfs_wait_reply() to wait for the reply
3387 * nfs_request_finish()
3388 * - break down rpc header and return with error or nfs reply
3389 * pointed to by nmrep.
3390 * nfs_request_rele()
3391 * nfs_request_destroy()
3392 * - clean up the request struct
3393 * - free the request struct if it was allocated by nfs_request_create()
3397 * Set up an NFS request struct (allocating if no request passed in).
3402 mount_t mp
, /* used only if !np */
3403 struct nfsm_chain
*nmrest
,
3407 struct nfsreq
**reqp
)
3409 struct nfsreq
*req
, *newreq
= NULL
;
3410 struct nfsmount
*nmp
;
3414 /* allocate a new NFS request structure */
3415 MALLOC_ZONE(newreq
, struct nfsreq
*, sizeof(*newreq
), M_NFSREQ
, M_WAITOK
);
3417 mbuf_freem(nmrest
->nmc_mhead
);
3418 nmrest
->nmc_mhead
= NULL
;
3424 bzero(req
, sizeof(*req
));
3426 req
->r_flags
= R_ALLOCATED
;
3428 nmp
= VFSTONFS(np
? NFSTOMP(np
) : mp
);
3431 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3434 lck_mtx_lock(&nmp
->nm_lock
);
3435 if ((nmp
->nm_state
& (NFSSTA_FORCE
|NFSSTA_TIMEO
)) ==
3436 (NFSSTA_FORCE
|NFSSTA_TIMEO
)) {
3437 lck_mtx_unlock(&nmp
->nm_lock
);
3438 mbuf_freem(nmrest
->nmc_mhead
);
3439 nmrest
->nmc_mhead
= NULL
;
3441 FREE_ZONE(newreq
, sizeof(*newreq
), M_NFSREQ
);
3445 if ((nmp
->nm_vers
!= NFS_VER4
) && (procnum
>= 0) && (procnum
< NFS_NPROCS
))
3446 OSAddAtomic(1, &nfsstats
.rpccnt
[procnum
]);
3447 if ((nmp
->nm_vers
== NFS_VER4
) && (procnum
!= NFSPROC4_COMPOUND
) && (procnum
!= NFSPROC4_NULL
))
3448 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum
);
3450 lck_mtx_init(&req
->r_mtx
, nfs_request_grp
, LCK_ATTR_NULL
);
3453 req
->r_thread
= thd
;
3455 req
->r_flags
|= R_NOINTR
;
3456 if (IS_VALID_CRED(cred
)) {
3457 kauth_cred_ref(cred
);
3460 req
->r_procnum
= procnum
;
3461 if (proct
[procnum
] > 0)
3462 req
->r_flags
|= R_TIMING
;
3463 req
->r_nmrep
.nmc_mhead
= NULL
;
3464 SLIST_INIT(&req
->r_gss_seqlist
);
3465 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3466 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3467 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3469 /* set auth flavor to use for request */
3471 req
->r_auth
= RPCAUTH_NONE
;
3472 else if (req
->r_np
&& (req
->r_np
->n_auth
!= RPCAUTH_INVALID
))
3473 req
->r_auth
= req
->r_np
->n_auth
;
3475 req
->r_auth
= nmp
->nm_auth
;
3477 lck_mtx_unlock(&nmp
->nm_lock
);
3479 /* move the request mbuf chain to the nfsreq */
3480 req
->r_mrest
= nmrest
->nmc_mhead
;
3481 nmrest
->nmc_mhead
= NULL
;
3483 req
->r_flags
|= R_INITTED
;
3491 * Clean up and free an NFS request structure.
3494 nfs_request_destroy(struct nfsreq
*req
)
3496 struct nfsmount
*nmp
= req
->r_np
? NFSTONMP(req
->r_np
) : req
->r_nmp
;
3497 struct gss_seq
*gsp
, *ngsp
;
3498 struct timespec ts
= { 1, 0 };
3499 int clearjbtimeo
= 0;
3501 if (!req
|| !(req
->r_flags
& R_INITTED
))
3503 req
->r_flags
&= ~R_INITTED
;
3504 if (req
->r_lflags
& RL_QUEUED
)
3505 nfs_reqdequeue(req
);
3506 if (req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
3507 /* still on an async I/O queue? */
3508 lck_mtx_lock(nfsiod_mutex
);
3509 if (nmp
&& (req
->r_achain
.tqe_next
!= NFSREQNOLIST
)) {
3510 TAILQ_REMOVE(&nmp
->nm_iodq
, req
, r_achain
);
3511 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
3513 lck_mtx_unlock(nfsiod_mutex
);
3515 lck_mtx_lock(&req
->r_mtx
);
3517 lck_mtx_lock(&nmp
->nm_lock
);
3518 if (req
->r_flags
& R_CWND
) {
3519 /* Decrement the outstanding request count. */
3520 req
->r_flags
&= ~R_CWND
;
3521 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3522 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3523 /* congestion window is open, poke the cwnd queue */
3524 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3525 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3526 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3530 if (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
) {
3531 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
3532 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
3533 if (req
->r_flags
& R_RESENDQ
)
3534 req
->r_flags
&= ~R_RESENDQ
;
3536 if (req
->r_cchain
.tqe_next
!= NFSREQNOLIST
) {
3537 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3538 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3540 if (req
->r_flags
& R_JBTPRINTFMSG
) {
3541 req
->r_flags
&= ~R_JBTPRINTFMSG
;
3543 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
3545 lck_mtx_unlock(&nmp
->nm_lock
);
3547 while (req
->r_flags
& R_RESENDQ
)
3548 msleep(req
, &req
->r_mtx
, (PZERO
- 1), "nfsresendqwait", &ts
);
3549 lck_mtx_unlock(&req
->r_mtx
);
3551 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
3553 mbuf_freem(req
->r_mhead
);
3554 else if (req
->r_mrest
)
3555 mbuf_freem(req
->r_mrest
);
3556 if (req
->r_nmrep
.nmc_mhead
)
3557 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
3558 if (IS_VALID_CRED(req
->r_cred
))
3559 kauth_cred_unref(&req
->r_cred
);
3560 if (nfs_request_using_gss(req
))
3561 nfs_gss_clnt_rpcdone(req
);
3562 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
)
3565 nfs_gss_clnt_ctx_unref(req
);
3566 if (req
->r_wrongsec
)
3567 FREE(req
->r_wrongsec
, M_TEMP
);
3569 lck_mtx_destroy(&req
->r_mtx
, nfs_request_grp
);
3570 if (req
->r_flags
& R_ALLOCATED
)
3571 FREE_ZONE(req
, sizeof(*req
), M_NFSREQ
);
3575 nfs_request_ref(struct nfsreq
*req
, int locked
)
3578 lck_mtx_lock(&req
->r_mtx
);
3579 if (req
->r_refs
<= 0)
3580 panic("nfsreq reference error");
3583 lck_mtx_unlock(&req
->r_mtx
);
3587 nfs_request_rele(struct nfsreq
*req
)
3591 lck_mtx_lock(&req
->r_mtx
);
3592 if (req
->r_refs
<= 0)
3593 panic("nfsreq reference underflow");
3595 destroy
= (req
->r_refs
== 0);
3596 lck_mtx_unlock(&req
->r_mtx
);
3598 nfs_request_destroy(req
);
3603 * Add an (updated) RPC header with authorization to an NFS request.
3606 nfs_request_add_header(struct nfsreq
*req
)
3608 struct nfsmount
*nmp
;
3612 /* free up any previous header */
3613 if ((m
= req
->r_mhead
)) {
3614 while (m
&& (m
!= req
->r_mrest
))
3616 req
->r_mhead
= NULL
;
3619 nmp
= req
->r_np
? NFSTONMP(req
->r_np
) : req
->r_nmp
;
3623 error
= nfsm_rpchead(req
, req
->r_mrest
, &req
->r_xid
, &req
->r_mhead
);
3627 req
->r_mreqlen
= mbuf_pkthdr_len(req
->r_mhead
);
3628 nmp
= req
->r_np
? NFSTONMP(req
->r_np
) : req
->r_nmp
;
3631 lck_mtx_lock(&nmp
->nm_lock
);
3632 if (NMFLAG(nmp
, SOFT
))
3633 req
->r_retry
= nmp
->nm_retry
;
3635 req
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
3636 lck_mtx_unlock(&nmp
->nm_lock
);
3643 * Queue an NFS request up and send it out.
3646 nfs_request_send(struct nfsreq
*req
, int wait
)
3648 struct nfsmount
*nmp
;
3651 lck_mtx_lock(&req
->r_mtx
);
3652 req
->r_flags
|= R_SENDING
;
3653 lck_mtx_unlock(&req
->r_mtx
);
3655 lck_mtx_lock(nfs_request_mutex
);
3657 nmp
= req
->r_np
? NFSTONMP(req
->r_np
) : req
->r_nmp
;
3659 lck_mtx_unlock(nfs_request_mutex
);
3664 if (!req
->r_start
) {
3665 req
->r_start
= now
.tv_sec
;
3666 req
->r_lastmsg
= now
.tv_sec
-
3667 ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
3670 OSAddAtomic(1, &nfsstats
.rpcrequests
);
3673 * Chain request into list of outstanding requests. Be sure
3674 * to put it LAST so timer finds oldest requests first.
3675 * Make sure that the request queue timer is running
3676 * to check for possible request timeout.
3678 TAILQ_INSERT_TAIL(&nfs_reqq
, req
, r_chain
);
3679 req
->r_lflags
|= RL_QUEUED
;
3680 if (!nfs_request_timer_on
) {
3681 nfs_request_timer_on
= 1;
3682 nfs_interval_timer_start(nfs_request_timer_call
,
3685 lck_mtx_unlock(nfs_request_mutex
);
3687 /* Send the request... */
3688 return (nfs_send(req
, wait
));
3692 * Call nfs_wait_reply() to wait for the reply.
3695 nfs_request_wait(struct nfsreq
*req
)
3697 req
->r_error
= nfs_wait_reply(req
);
3701 * Finish up an NFS request by dequeueing it and
3702 * doing the initial NFS request reply processing.
3707 struct nfsm_chain
*nmrepp
,
3710 struct nfsmount
*nmp
;
3713 uint32_t verf_len
= 0;
3714 uint32_t reply_status
= 0;
3715 uint32_t rejected_status
= 0;
3716 uint32_t auth_status
= 0;
3717 uint32_t accepted_status
= 0;
3718 struct nfsm_chain nmrep
;
3719 int error
, clearjbtimeo
;
3721 error
= req
->r_error
;
3724 nmrepp
->nmc_mhead
= NULL
;
3726 /* RPC done, unlink the request. */
3727 nfs_reqdequeue(req
);
3729 mrep
= req
->r_nmrep
.nmc_mhead
;
3731 nmp
= req
->r_np
? NFSTONMP(req
->r_np
) : req
->r_nmp
;
3733 if ((req
->r_flags
& R_CWND
) && nmp
) {
3735 * Decrement the outstanding request count.
3737 req
->r_flags
&= ~R_CWND
;
3738 lck_mtx_lock(&nmp
->nm_lock
);
3739 FSDBG(273, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3740 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3741 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3742 /* congestion window is open, poke the cwnd queue */
3743 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3744 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3745 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3748 lck_mtx_unlock(&nmp
->nm_lock
);
3751 if (nfs_request_using_gss(req
)) {
3753 * If the request used an RPCSEC_GSS credential
3754 * then reset its sequence number bit in the
3757 nfs_gss_clnt_rpcdone(req
);
3760 * If we need to re-send, go back and re-build the
3761 * request based on a new sequence number.
3762 * Note that we're using the original XID.
3764 if (error
== EAGAIN
) {
3768 error
= nfs_gss_clnt_args_restore(req
); // remove any trailer mbufs
3769 req
->r_nmrep
.nmc_mhead
= NULL
;
3770 req
->r_flags
|= R_RESTART
;
3771 if (error
== ENEEDAUTH
) {
3772 req
->r_xid
= 0; // get a new XID
3780 * If there was a successful reply, make sure to mark the mount as up.
3781 * If a tprintf message was given (or if this is a timed-out soft mount)
3782 * then post a tprintf message indicating the server is alive again.
3785 if ((req
->r_flags
& R_TPRINTFMSG
) ||
3786 (nmp
&& NMFLAG(nmp
, SOFT
) &&
3787 ((nmp
->nm_state
& (NFSSTA_TIMEO
|NFSSTA_FORCE
)) == NFSSTA_TIMEO
)))
3788 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, "is alive again");
3790 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, NULL
);
3797 * break down the RPC header and check if ok
3799 nmrep
= req
->r_nmrep
;
3800 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
3802 if (reply_status
== RPC_MSGDENIED
) {
3803 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
3805 if (rejected_status
== RPC_MISMATCH
) {
3809 nfsm_chain_get_32(error
, &nmrep
, auth_status
);
3811 switch (auth_status
) {
3812 case RPCSEC_GSS_CREDPROBLEM
:
3813 case RPCSEC_GSS_CTXPROBLEM
:
3815 * An RPCSEC_GSS cred or context problem.
3816 * We can't use it anymore.
3817 * Restore the args, renew the context
3818 * and set up for a resend.
3820 error
= nfs_gss_clnt_args_restore(req
);
3821 if (error
&& error
!= ENEEDAUTH
)
3825 error
= nfs_gss_clnt_ctx_renew(req
);
3830 req
->r_nmrep
.nmc_mhead
= NULL
;
3831 req
->r_xid
= 0; // get a new XID
3832 req
->r_flags
|= R_RESTART
;
3841 /* Now check the verifier */
3842 nfsm_chain_get_32(error
, &nmrep
, verf_type
); // verifier flavor
3843 nfsm_chain_get_32(error
, &nmrep
, verf_len
); // verifier length
3846 switch (req
->r_auth
) {
3849 /* Any AUTH_SYS verifier is ignored */
3851 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
3852 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
3857 error
= nfs_gss_clnt_verf_get(req
, &nmrep
,
3858 verf_type
, verf_len
, &accepted_status
);
3863 switch (accepted_status
) {
3865 if (req
->r_procnum
== NFSPROC_NULL
) {
3867 * The NFS null procedure is unique,
3868 * in not returning an NFS status.
3872 nfsm_chain_get_32(error
, &nmrep
, *status
);
3876 if ((nmp
->nm_vers
!= NFS_VER2
) && (*status
== NFSERR_TRYLATER
)) {
3878 * It's a JUKEBOX error - delay and try again
3880 int delay
, slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
3883 req
->r_nmrep
.nmc_mhead
= NULL
;
3884 if ((req
->r_delay
>= 30) && !(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
3885 /* we're not yet completely mounted and */
3886 /* we can't complete an RPC, so we fail */
3887 OSAddAtomic(1, &nfsstats
.rpctimeouts
);
3889 error
= req
->r_error
;
3892 req
->r_delay
= !req
->r_delay
? NFS_TRYLATERDEL
: (req
->r_delay
* 2);
3893 if (req
->r_delay
> 30)
3895 if (nmp
->nm_tprintf_initial_delay
&& (req
->r_delay
>= nmp
->nm_tprintf_initial_delay
)) {
3896 if (!(req
->r_flags
& R_JBTPRINTFMSG
)) {
3897 req
->r_flags
|= R_JBTPRINTFMSG
;
3898 lck_mtx_lock(&nmp
->nm_lock
);
3900 lck_mtx_unlock(&nmp
->nm_lock
);
3902 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_JUKEBOXTIMEO
,
3903 "resource temporarily unavailable (jukebox)");
3905 if (NMFLAG(nmp
, SOFT
) && (req
->r_delay
== 30) && !(req
->r_flags
& R_NOINTR
)) {
3906 /* for soft mounts, just give up after a short while */
3907 OSAddAtomic(1, &nfsstats
.rpctimeouts
);
3909 error
= req
->r_error
;
3912 delay
= req
->r_delay
;
3913 if (req
->r_callback
.rcb_func
) {
3916 req
->r_resendtime
= now
.tv_sec
+ delay
;
3919 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
3921 tsleep(&lbolt
, PSOCK
|slpflag
, "nfs_jukebox_trylater", 0);
3923 } while (--delay
> 0);
3925 req
->r_xid
= 0; // get a new XID
3926 req
->r_flags
|= R_RESTART
;
3928 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_TRYLATER
);
3932 if (req
->r_flags
& R_JBTPRINTFMSG
) {
3933 req
->r_flags
&= ~R_JBTPRINTFMSG
;
3934 lck_mtx_lock(&nmp
->nm_lock
);
3936 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
3937 lck_mtx_unlock(&nmp
->nm_lock
);
3938 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, "resource available again");
3941 if ((nmp
->nm_vers
>= NFS_VER4
) && (*status
== NFSERR_WRONGSEC
)) {
3943 * Hmmm... we need to try a different security flavor.
3944 * The first time a request hits this, we will allocate an array
3945 * to track flavors to try. We fill the array with the mount's
3946 * preferred flavors or the server's preferred flavors or just the
3947 * flavors we support.
3949 uint32_t srvflavors
[NX_MAX_SEC_FLAVORS
];
3952 /* Call SECINFO to try to get list of flavors from server. */
3953 srvcount
= NX_MAX_SEC_FLAVORS
;
3954 nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, req
->r_cred
, srvflavors
, &srvcount
);
3956 if (!req
->r_wrongsec
) {
3957 /* first time... set up flavor array */
3958 MALLOC(req
->r_wrongsec
, uint32_t*, NX_MAX_SEC_FLAVORS
*sizeof(uint32_t), M_TEMP
, M_WAITOK
);
3959 if (!req
->r_wrongsec
) {
3964 if (nmp
->nm_sec
.count
) { /* use the mount's preferred list of flavors */
3965 for(; i
< nmp
->nm_sec
.count
; i
++)
3966 req
->r_wrongsec
[i
] = nmp
->nm_sec
.flavors
[i
];
3967 } else if (srvcount
) { /* otherwise use the server's list of flavors */
3968 for(; i
< srvcount
; i
++)
3969 req
->r_wrongsec
[i
] = srvflavors
[i
];
3970 } else { /* otherwise, just try the flavors we support. */
3971 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5P
;
3972 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5I
;
3973 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5
;
3974 req
->r_wrongsec
[i
++] = RPCAUTH_SYS
;
3975 req
->r_wrongsec
[i
++] = RPCAUTH_NONE
;
3977 for(; i
< NX_MAX_SEC_FLAVORS
; i
++) /* invalidate any remaining slots */
3978 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
3981 /* clear the current flavor from the list */
3982 for(i
=0; i
< NX_MAX_SEC_FLAVORS
; i
++)
3983 if (req
->r_wrongsec
[i
] == req
->r_auth
)
3984 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
3986 /* find the next flavor to try */
3987 for(i
=0; i
< NX_MAX_SEC_FLAVORS
; i
++)
3988 if (req
->r_wrongsec
[i
] != RPCAUTH_INVALID
) {
3989 if (((req
->r_wrongsec
[i
] == RPCAUTH_KRB5P
) ||
3990 (req
->r_wrongsec
[i
] == RPCAUTH_KRB5I
) ||
3991 (req
->r_wrongsec
[i
] == RPCAUTH_KRB5
)) && (req
->r_gss_ctx
&&
3992 (req
->r_gss_ctx
->gss_clnt_service
== RPCSEC_GSS_SVC_SYS
))) {
3993 /* don't bother trying Kerberos if we've already got a fallback context */
3994 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
3997 if (!srvcount
) /* no server list, just try it */
3999 /* check that it's in the server's list */
4000 for(j
=0; j
< srvcount
; j
++)
4001 if (req
->r_wrongsec
[i
] == srvflavors
[j
])
4003 if (j
< srvcount
) /* found */
4005 /* not found in server list */
4006 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4008 if (i
== NX_MAX_SEC_FLAVORS
) {
4009 /* nothing left to try! */
4014 /* retry with the next auth flavor */
4015 req
->r_auth
= req
->r_wrongsec
[i
];
4016 req
->r_xid
= 0; // get a new XID
4017 req
->r_flags
|= R_RESTART
;
4019 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_WRONGSEC
);
4022 if ((nmp
->nm_vers
>= NFS_VER4
) && req
->r_wrongsec
) {
4024 * We renegotiated security for this request; so update the
4025 * default security flavor for the associated node.
4028 req
->r_np
->n_auth
= req
->r_auth
;
4031 if (*status
== NFS_OK
) {
4033 * Successful NFS request
4036 req
->r_nmrep
.nmc_mhead
= NULL
;
4039 /* Got an NFS error of some kind */
4042 * If the File Handle was stale, invalidate the
4043 * lookup cache, just in case.
4045 if ((*status
== ESTALE
) && req
->r_np
) {
4046 cache_purge(NFSTOV(req
->r_np
));
4047 /* if monitored, also send delete event */
4048 if (vnode_ismonitored(NFSTOV(req
->r_np
)))
4049 nfs_vnode_notify(req
->r_np
, (VNODE_EVENT_ATTRIB
|VNODE_EVENT_DELETE
));
4051 if (nmp
->nm_vers
== NFS_VER2
)
4055 req
->r_nmrep
.nmc_mhead
= NULL
;
4058 case RPC_PROGUNAVAIL
:
4059 error
= EPROGUNAVAIL
;
4061 case RPC_PROGMISMATCH
:
4062 error
= ERPCMISMATCH
;
4064 case RPC_PROCUNAVAIL
:
4065 error
= EPROCUNAVAIL
;
4070 case RPC_SYSTEM_ERR
:
4076 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4077 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4078 lck_mtx_lock(&nmp
->nm_lock
);
4080 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4081 lck_mtx_unlock(&nmp
->nm_lock
);
4083 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4085 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
,
4086 (!error
&& (*status
== NFS_OK
)) ? 0xf0f0f0f0 : error
);
4091 * NFS request using a GSS/Kerberos security flavor?
4094 nfs_request_using_gss(struct nfsreq
*req
)
4096 if (!req
->r_gss_ctx
)
4098 switch (req
->r_auth
) {
4108 * Perform an NFS request synchronously.
4114 mount_t mp
, /* used only if !np */
4115 struct nfsm_chain
*nmrest
,
4118 struct nfsreq_secinfo_args
*si
,
4119 struct nfsm_chain
*nmrepp
,
4123 return nfs_request2(np
, mp
, nmrest
, procnum
,
4124 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4125 si
, 0, nmrepp
, xidp
, status
);
4131 mount_t mp
, /* used only if !np */
4132 struct nfsm_chain
*nmrest
,
4136 struct nfsreq_secinfo_args
*si
,
4138 struct nfsm_chain
*nmrepp
,
4142 struct nfsreq rq
, *req
= &rq
;
4145 if ((error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, &req
)))
4147 req
->r_flags
|= (flags
& R_OPTMASK
);
4149 req
->r_secinfo
= *si
;
4151 FSDBG_TOP(273, R_XID32(req
->r_xid
), np
, procnum
, 0);
4154 req
->r_flags
&= ~R_RESTART
;
4155 if ((error
= nfs_request_add_header(req
)))
4159 if ((error
= nfs_request_send(req
, 1)))
4161 nfs_request_wait(req
);
4162 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4164 } while (req
->r_flags
& R_RESTART
);
4166 FSDBG_BOT(273, R_XID32(req
->r_xid
), np
, procnum
, error
);
4167 nfs_request_rele(req
);
4173 * Set up a new null proc request to exchange GSS context tokens with the
4174 * server. Associate the context that we are setting up with the request that we
4181 struct nfsm_chain
*nmrest
,
4185 struct nfs_gss_clnt_ctx
*cp
, /* Set to gss context to renew or setup */
4186 struct nfsm_chain
*nmrepp
,
4189 struct nfsreq rq
, *req
= &rq
;
4192 if ((error
= nfs_request_create(NULL
, mp
, nmrest
, NFSPROC_NULL
, thd
, cred
, &req
)))
4194 req
->r_flags
|= (flags
& R_OPTMASK
);
4197 printf("nfs_request_gss request has no context\n");
4198 nfs_request_rele(req
);
4199 return (NFSERR_EAUTH
);
4201 nfs_gss_clnt_ctx_ref(req
, cp
);
4203 FSDBG_TOP(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, 0);
4206 req
->r_flags
&= ~R_RESTART
;
4207 if ((error
= nfs_request_add_header(req
)))
4210 if ((error
= nfs_request_send(req
, 1)))
4212 nfs_request_wait(req
);
4213 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4215 } while (req
->r_flags
& R_RESTART
);
4217 FSDBG_BOT(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, error
);
4218 nfs_request_rele(req
);
4223 * Create and start an asynchronous NFS request.
4228 mount_t mp
, /* used only if !np */
4229 struct nfsm_chain
*nmrest
,
4233 struct nfsreq_secinfo_args
*si
,
4235 struct nfsreq_cbinfo
*cb
,
4236 struct nfsreq
**reqp
)
4239 struct nfsmount
*nmp
;
4242 error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, reqp
);
4244 FSDBG(274, (req
? R_XID32(req
->r_xid
) : 0), np
, procnum
, error
);
4247 req
->r_flags
|= (flags
& R_OPTMASK
);
4248 req
->r_flags
|= R_ASYNC
;
4250 req
->r_secinfo
= *si
;
4252 req
->r_callback
= *cb
;
4253 error
= nfs_request_add_header(req
);
4255 req
->r_flags
|= R_WAITSENT
;
4256 if (req
->r_callback
.rcb_func
)
4257 nfs_request_ref(req
, 0);
4258 error
= nfs_request_send(req
, 1);
4259 lck_mtx_lock(&req
->r_mtx
);
4260 if (!error
&& !(req
->r_flags
& R_SENT
) && req
->r_callback
.rcb_func
) {
4261 /* make sure to wait until this async I/O request gets sent */
4262 int slpflag
= (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4263 struct timespec ts
= { 2, 0 };
4264 while (!(req
->r_flags
& R_SENT
)) {
4265 if ((req
->r_flags
& R_RESENDQ
) && ((nmp
= req
->r_nmp
))) {
4266 lck_mtx_lock(&nmp
->nm_lock
);
4267 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4269 * It's not going to get off the resend queue if we're in recovery.
4270 * So, just take it off ourselves. We could be holding mount state
4271 * busy and thus holding up the start of recovery.
4273 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4274 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4275 if (req
->r_flags
& R_RESENDQ
)
4276 req
->r_flags
&= ~R_RESENDQ
;
4277 lck_mtx_unlock(&nmp
->nm_lock
);
4278 req
->r_flags
|= R_SENDING
;
4279 lck_mtx_unlock(&req
->r_mtx
);
4280 error
= nfs_send(req
, 1);
4281 lck_mtx_lock(&req
->r_mtx
);
4286 lck_mtx_unlock(&nmp
->nm_lock
);
4288 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
4290 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitsent", &ts
);
4294 sent
= req
->r_flags
& R_SENT
;
4295 lck_mtx_unlock(&req
->r_mtx
);
4296 if (error
&& req
->r_callback
.rcb_func
&& !sent
)
4297 nfs_request_rele(req
);
4299 FSDBG(274, R_XID32(req
->r_xid
), np
, procnum
, error
);
4300 if (error
|| req
->r_callback
.rcb_func
)
4301 nfs_request_rele(req
);
4306 * Wait for and finish an asynchronous NFS request.
4309 nfs_request_async_finish(
4311 struct nfsm_chain
*nmrepp
,
4315 int error
= 0, asyncio
= req
->r_callback
.rcb_func
? 1 : 0;
4316 struct nfsmount
*nmp
;
4318 lck_mtx_lock(&req
->r_mtx
);
4320 req
->r_flags
|= R_ASYNCWAIT
;
4321 while (req
->r_flags
& R_RESENDQ
) { /* wait until the request is off the resend queue */
4322 struct timespec ts
= { 2, 0 };
4323 if ((nmp
= req
->r_nmp
)) {
4324 lck_mtx_lock(&nmp
->nm_lock
);
4325 if ((nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
4327 * It's not going to get off the resend queue if we're in recovery.
4328 * So, just take it off ourselves. We could be holding mount state
4329 * busy and thus holding up the start of recovery.
4331 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4332 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4333 if (req
->r_flags
& R_RESENDQ
)
4334 req
->r_flags
&= ~R_RESENDQ
;
4335 lck_mtx_unlock(&nmp
->nm_lock
);
4338 lck_mtx_unlock(&nmp
->nm_lock
);
4340 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0)))
4342 msleep(req
, &req
->r_mtx
, PZERO
-1, "nfsresendqwait", &ts
);
4344 lck_mtx_unlock(&req
->r_mtx
);
4347 nfs_request_wait(req
);
4348 error
= nfs_request_finish(req
, nmrepp
, status
);
4351 while (!error
&& (req
->r_flags
& R_RESTART
)) {
4352 if (asyncio
&& req
->r_resendtime
) { /* send later */
4353 lck_mtx_lock(&req
->r_mtx
);
4354 nfs_asyncio_resend(req
);
4355 lck_mtx_unlock(&req
->r_mtx
);
4356 return (EINPROGRESS
);
4359 req
->r_flags
&= ~R_RESTART
;
4360 if ((error
= nfs_request_add_header(req
)))
4362 if ((error
= nfs_request_send(req
, !asyncio
)))
4365 return (EINPROGRESS
);
4366 nfs_request_wait(req
);
4367 if ((error
= nfs_request_finish(req
, nmrepp
, status
)))
4373 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, error
);
4374 nfs_request_rele(req
);
4379 * Cancel a pending asynchronous NFS request.
4382 nfs_request_async_cancel(struct nfsreq
*req
)
4384 nfs_reqdequeue(req
);
4385 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, 0xD1ED1E);
4386 nfs_request_rele(req
);
4390 * Flag a request as being terminated.
4393 nfs_softterm(struct nfsreq
*req
)
4395 struct nfsmount
*nmp
= req
->r_nmp
;
4396 req
->r_flags
|= R_SOFTTERM
;
4397 req
->r_error
= ETIMEDOUT
;
4398 if (!(req
->r_flags
& R_CWND
) || !nmp
)
4400 /* update congestion window */
4401 req
->r_flags
&= ~R_CWND
;
4402 lck_mtx_lock(&nmp
->nm_lock
);
4403 FSDBG(532, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4404 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4405 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4406 /* congestion window is open, poke the cwnd queue */
4407 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4408 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4409 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4412 lck_mtx_unlock(&nmp
->nm_lock
);
4416 * Ensure req isn't in use by the timer, then dequeue it.
4419 nfs_reqdequeue(struct nfsreq
*req
)
4421 lck_mtx_lock(nfs_request_mutex
);
4422 while (req
->r_lflags
& RL_BUSY
) {
4423 req
->r_lflags
|= RL_WAITING
;
4424 msleep(&req
->r_lflags
, nfs_request_mutex
, PSOCK
, "reqdeq", NULL
);
4426 if (req
->r_lflags
& RL_QUEUED
) {
4427 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
4428 req
->r_lflags
&= ~RL_QUEUED
;
4430 lck_mtx_unlock(nfs_request_mutex
);
4434 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
4435 * free()'d out from under it.
4438 nfs_reqbusy(struct nfsreq
*req
)
4440 if (req
->r_lflags
& RL_BUSY
)
4441 panic("req locked");
4442 req
->r_lflags
|= RL_BUSY
;
4446 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
4449 nfs_reqnext(struct nfsreq
*req
)
4451 struct nfsreq
* nextreq
;
4456 * We need to get and busy the next req before signalling the
4457 * current one, otherwise wakeup() may block us and we'll race to
4458 * grab the next req.
4460 nextreq
= TAILQ_NEXT(req
, r_chain
);
4461 if (nextreq
!= NULL
)
4462 nfs_reqbusy(nextreq
);
4463 /* unbusy and signal. */
4464 req
->r_lflags
&= ~RL_BUSY
;
4465 if (req
->r_lflags
& RL_WAITING
) {
4466 req
->r_lflags
&= ~RL_WAITING
;
4467 wakeup(&req
->r_lflags
);
4473 * NFS request queue timer routine
4475 * Scan the NFS request queue for any requests that have timed out.
4477 * Alert the system of unresponsive servers.
4478 * Mark expired requests on soft mounts as terminated.
4479 * For UDP, mark/signal requests for retransmission.
4482 nfs_request_timer(__unused
void *param0
, __unused
void *param1
)
4485 struct nfsmount
*nmp
;
4486 int timeo
, maxtime
, finish_asyncio
, error
;
4488 TAILQ_HEAD(nfs_mount_pokeq
, nfsmount
) nfs_mount_poke_queue
;
4490 lck_mtx_lock(nfs_request_mutex
);
4491 req
= TAILQ_FIRST(&nfs_reqq
);
4492 if (req
== NULL
) { /* no requests - turn timer off */
4493 nfs_request_timer_on
= 0;
4494 lck_mtx_unlock(nfs_request_mutex
);
4499 TAILQ_INIT(&nfs_mount_poke_queue
);
4502 for ( ; req
!= NULL
; req
= nfs_reqnext(req
)) {
4504 if (!nmp
) /* unmounted */
4506 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
)
4508 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0))) {
4509 if (req
->r_callback
.rcb_func
!= NULL
) {
4510 /* async I/O RPC needs to be finished */
4511 lck_mtx_lock(&req
->r_mtx
);
4512 req
->r_error
= error
;
4513 finish_asyncio
= !(req
->r_flags
& R_WAITSENT
);
4515 lck_mtx_unlock(&req
->r_mtx
);
4517 nfs_asyncio_finish(req
);
4522 lck_mtx_lock(&req
->r_mtx
);
4524 if (nmp
->nm_tprintf_initial_delay
&&
4525 ((req
->r_rexmit
> 2) || (req
->r_flags
& R_RESENDERR
)) &&
4526 ((req
->r_lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
4527 req
->r_lastmsg
= now
.tv_sec
;
4528 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
4530 req
->r_flags
|= R_TPRINTFMSG
;
4531 lck_mtx_lock(&nmp
->nm_lock
);
4532 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4533 lck_mtx_unlock(&nmp
->nm_lock
);
4534 /* we're not yet completely mounted and */
4535 /* we can't complete an RPC, so we fail */
4536 OSAddAtomic(1, &nfsstats
.rpctimeouts
);
4538 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
4540 lck_mtx_unlock(&req
->r_mtx
);
4542 nfs_asyncio_finish(req
);
4545 lck_mtx_unlock(&nmp
->nm_lock
);
4549 * Put a reasonable limit on the maximum timeout,
4550 * and reduce that limit when soft mounts get timeouts or are in reconnect.
4552 if (!NMFLAG(nmp
, SOFT
))
4553 maxtime
= NFS_MAXTIMEO
;
4554 else if ((req
->r_flags
& (R_SETUP
|R_RECOVER
)) ||
4555 ((nmp
->nm_reconnect_start
<= 0) || ((now
.tv_sec
- nmp
->nm_reconnect_start
) < 8)))
4556 maxtime
= (NFS_MAXTIMEO
/ (nmp
->nm_timeouts
+1))/2;
4558 maxtime
= NFS_MINTIMEO
/4;
4561 * Check for request timeout.
4563 if (req
->r_rtt
>= 0) {
4565 lck_mtx_lock(&nmp
->nm_lock
);
4566 if (req
->r_flags
& R_RESENDERR
) {
4567 /* with resend errors, retry every few seconds */
4570 if (req
->r_procnum
== NFSPROC_NULL
&& req
->r_gss_ctx
!= NULL
)
4571 timeo
= NFS_MINIDEMTIMEO
; // gss context setup
4572 else if (NMFLAG(nmp
, DUMBTIMER
))
4573 timeo
= nmp
->nm_timeo
;
4575 timeo
= NFS_RTO(nmp
, proct
[req
->r_procnum
]);
4577 /* ensure 62.5 ms floor */
4578 while (16 * timeo
< hz
)
4580 if (nmp
->nm_timeouts
> 0)
4581 timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
4583 /* limit timeout to max */
4584 if (timeo
> maxtime
)
4586 if (req
->r_rtt
<= timeo
) {
4587 lck_mtx_unlock(&nmp
->nm_lock
);
4588 lck_mtx_unlock(&req
->r_mtx
);
4591 /* The request has timed out */
4592 NFS_SOCK_DBG(("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
4593 req
->r_procnum
, proct
[req
->r_procnum
],
4594 req
->r_xid
, req
->r_rtt
, timeo
, nmp
->nm_timeouts
,
4595 (now
.tv_sec
- req
->r_start
)*NFS_HZ
, maxtime
));
4596 if (nmp
->nm_timeouts
< 8)
4598 nfs_mount_check_dead_timeout(nmp
);
4599 /* if it's been a few seconds, try poking the socket */
4600 if ((nmp
->nm_sotype
== SOCK_STREAM
) &&
4601 ((now
.tv_sec
- req
->r_start
) >= 3) &&
4602 !(nmp
->nm_sockflags
& (NMSOCK_POKE
|NMSOCK_UNMOUNT
)) &&
4603 (nmp
->nm_sockflags
& NMSOCK_READY
)) {
4604 nmp
->nm_sockflags
|= NMSOCK_POKE
;
4605 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
4607 lck_mtx_unlock(&nmp
->nm_lock
);
4610 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
4611 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& (R_SETUP
|R_RECOVER
))) &&
4612 ((req
->r_rexmit
>= req
->r_retry
) || /* too many */
4613 ((now
.tv_sec
- req
->r_start
)*NFS_HZ
> maxtime
))) { /* too long */
4614 OSAddAtomic(1, &nfsstats
.rpctimeouts
);
4615 lck_mtx_lock(&nmp
->nm_lock
);
4616 if (!(nmp
->nm_state
& NFSSTA_TIMEO
)) {
4617 lck_mtx_unlock(&nmp
->nm_lock
);
4618 /* make sure we note the unresponsive server */
4619 /* (maxtime may be less than tprintf delay) */
4620 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
4622 req
->r_lastmsg
= now
.tv_sec
;
4623 req
->r_flags
|= R_TPRINTFMSG
;
4625 lck_mtx_unlock(&nmp
->nm_lock
);
4627 if (req
->r_flags
& R_NOINTR
) {
4628 /* don't terminate nointr requests on timeout */
4629 lck_mtx_unlock(&req
->r_mtx
);
4632 NFS_SOCK_DBG(("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
4633 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
,
4634 now
.tv_sec
- req
->r_start
));
4636 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
4638 lck_mtx_unlock(&req
->r_mtx
);
4640 nfs_asyncio_finish(req
);
4644 /* for TCP, only resend if explicitly requested */
4645 if ((nmp
->nm_sotype
== SOCK_STREAM
) && !(req
->r_flags
& R_MUSTRESEND
)) {
4646 if (++req
->r_rexmit
> NFS_MAXREXMIT
)
4647 req
->r_rexmit
= NFS_MAXREXMIT
;
4649 lck_mtx_unlock(&req
->r_mtx
);
4654 * The request needs to be (re)sent. Kick the requester to resend it.
4655 * (unless it's already marked as needing a resend)
4657 if ((req
->r_flags
& R_MUSTRESEND
) && (req
->r_rtt
== -1)) {
4658 lck_mtx_unlock(&req
->r_mtx
);
4661 NFS_SOCK_DBG(("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
4662 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
));
4663 req
->r_flags
|= R_MUSTRESEND
;
4666 if ((req
->r_flags
& (R_ASYNC
|R_ASYNCWAIT
|R_SENDING
)) == R_ASYNC
)
4667 nfs_asyncio_resend(req
);
4668 lck_mtx_unlock(&req
->r_mtx
);
4671 lck_mtx_unlock(nfs_request_mutex
);
4673 /* poke any sockets */
4674 while ((nmp
= TAILQ_FIRST(&nfs_mount_poke_queue
))) {
4675 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
4677 lck_mtx_lock(&nmp
->nm_lock
);
4678 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
4679 wakeup(&nmp
->nm_sockflags
);
4680 lck_mtx_unlock(&nmp
->nm_lock
);
4683 nfs_interval_timer_start(nfs_request_timer_call
, NFS_REQUESTDELAY
);
4687 * check a thread's proc for the "noremotehang" flag.
4690 nfs_noremotehang(thread_t thd
)
4692 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
4693 return (p
&& proc_noremotehang(p
));
4697 * Test for a termination condition pending on the process.
4698 * This is used to determine if we need to bail on a mount.
4699 * ETIMEDOUT is returned if there has been a soft timeout.
4700 * EINTR is returned if there is a signal pending that is not being ignored
4701 * and the mount is interruptable, or if we are a thread that is in the process
4702 * of cancellation (also SIGKILL posted).
4704 extern int sigprop
[NSIG
+1];
4706 nfs_sigintr(struct nfsmount
*nmp
, struct nfsreq
*req
, thread_t thd
, int nmplocked
)
4714 if (req
&& (req
->r_flags
& R_SOFTTERM
))
4715 return (ETIMEDOUT
); /* request has been terminated. */
4716 if (req
&& (req
->r_flags
& R_NOINTR
))
4717 thd
= NULL
; /* don't check for signal on R_NOINTR */
4720 lck_mtx_lock(&nmp
->nm_lock
);
4721 if (nmp
->nm_state
& NFSSTA_FORCE
) {
4722 /* If a force unmount is in progress then fail. */
4724 } else if (nmp
->nm_mountp
->mnt_kern_flag
& MNTK_FRCUNMOUNT
) {
4725 /* Someone is unmounting us, go soft and mark it. */
4726 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_SOFT
);
4727 nmp
->nm_state
|= NFSSTA_FORCE
;
4730 /* Check if the mount is marked dead. */
4731 if (!error
&& (nmp
->nm_state
& NFSSTA_DEAD
))
4735 * If the mount is hung and we've requested not to hang
4736 * on remote filesystems, then bail now.
4738 if (!error
&& (nmp
->nm_state
& NFSSTA_TIMEO
) && nfs_noremotehang(thd
))
4742 lck_mtx_unlock(&nmp
->nm_lock
);
4746 /* may not have a thread for async I/O */
4751 * Check if the process is aborted, but don't interrupt if we
4752 * were killed by a signal and this is the exiting thread which
4753 * is attempting to dump core.
4755 if (((p
= current_proc()) != kernproc
) && current_thread_aborted() &&
4756 (!(p
->p_acflag
& AXSIG
) || (p
->exit_thread
!= current_thread()) ||
4757 (p
->p_sigacts
== NULL
) ||
4758 (p
->p_sigacts
->ps_sig
< 1) || (p
->p_sigacts
->ps_sig
> NSIG
) ||
4759 !(sigprop
[p
->p_sigacts
->ps_sig
] & SA_CORE
)))
4762 /* mask off thread and process blocked signals. */
4763 if (NMFLAG(nmp
, INTR
) && ((p
= get_bsdthreadtask_info(thd
))) &&
4764 proc_pendingsignals(p
, NFSINT_SIGMASK
))
4770 * Lock a socket against others.
4771 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
4772 * and also to avoid race conditions between the processes with nfs requests
4773 * in progress when a reconnect is necessary.
4776 nfs_sndlock(struct nfsreq
*req
)
4778 struct nfsmount
*nmp
= req
->r_nmp
;
4780 int error
= 0, slpflag
= 0;
4781 struct timespec ts
= { 0, 0 };
4786 lck_mtx_lock(&nmp
->nm_lock
);
4787 statep
= &nmp
->nm_state
;
4789 if (NMFLAG(nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
))
4791 while (*statep
& NFSSTA_SNDLOCK
) {
4792 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 1)))
4794 *statep
|= NFSSTA_WANTSND
;
4795 if (nfs_noremotehang(req
->r_thread
))
4797 msleep(statep
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsndlck", &ts
);
4798 if (slpflag
== PCATCH
) {
4804 *statep
|= NFSSTA_SNDLOCK
;
4805 lck_mtx_unlock(&nmp
->nm_lock
);
4810 * Unlock the stream socket for others.
4813 nfs_sndunlock(struct nfsreq
*req
)
4815 struct nfsmount
*nmp
= req
->r_nmp
;
4816 int *statep
, wake
= 0;
4820 lck_mtx_lock(&nmp
->nm_lock
);
4821 statep
= &nmp
->nm_state
;
4822 if ((*statep
& NFSSTA_SNDLOCK
) == 0)
4823 panic("nfs sndunlock");
4824 *statep
&= ~(NFSSTA_SNDLOCK
|NFSSTA_SENDING
);
4825 if (*statep
& NFSSTA_WANTSND
) {
4826 *statep
&= ~NFSSTA_WANTSND
;
4829 lck_mtx_unlock(&nmp
->nm_lock
);
4836 struct nfsmount
*nmp
,
4838 struct sockaddr
*saddr
,
4845 struct nfsm_chain
*nmrep
)
4847 int error
= 0, on
= 1, try, sendat
= 2, soproto
, recv
, optlen
, restoreto
= 0;
4848 socket_t newso
= NULL
;
4849 struct sockaddr_storage ss
;
4850 struct timeval orig_rcvto
, orig_sndto
, tv
= { 1, 0 };
4851 mbuf_t m
, mrep
= NULL
;
4853 uint32_t rxid
= 0, reply
= 0, reply_status
, rejected_status
;
4854 uint32_t verf_type
, verf_len
, accepted_status
;
4855 size_t readlen
, sentlen
;
4856 struct nfs_rpc_record_state nrrs
;
4859 /* create socket and set options */
4860 soproto
= (sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
;
4861 if ((error
= sock_socket(saddr
->sa_family
, sotype
, soproto
, NULL
, NULL
, &newso
)))
4865 int level
= (saddr
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
4866 int optname
= (saddr
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
4867 int portrange
= IP_PORTRANGE_LOW
;
4868 error
= sock_setsockopt(newso
, level
, optname
, &portrange
, sizeof(portrange
));
4870 ss
.ss_len
= saddr
->sa_len
;
4871 ss
.ss_family
= saddr
->sa_family
;
4872 if (ss
.ss_family
== AF_INET
) {
4873 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
4874 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
4875 } else if (ss
.ss_family
== AF_INET6
) {
4876 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
4877 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
4882 error
= sock_bind(newso
, (struct sockaddr
*)&ss
);
4886 if (sotype
== SOCK_STREAM
) {
4887 on
= 4; /* don't wait too long for the socket to connect */
4888 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_CONNECTIONTIMEOUT
, &on
, sizeof(on
));
4889 error
= sock_connect(newso
, saddr
, 0);
4892 if (((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
)))) ||
4893 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
)))) ||
4894 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
)))))
4898 /* make sure socket is using a one second timeout in this function */
4899 optlen
= sizeof(orig_rcvto
);
4900 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, &optlen
);
4902 optlen
= sizeof(orig_sndto
);
4903 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, &optlen
);
4906 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
));
4907 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
));
4912 if (sotype
== SOCK_STREAM
) {
4913 sendat
= 0; /* we only resend the request for UDP */
4914 nfs_rpc_record_state_init(&nrrs
);
4917 for (try=0; try < timeo
; try++) {
4918 if ((error
= nfs_sigintr(nmp
, NULL
, !try ? NULL
: thd
, 0)))
4920 if (!try || (try == sendat
)) {
4921 /* send the request (resending periodically for UDP) */
4922 if ((error
= mbuf_copym(mreq
, 0, MBUF_COPYALL
, MBUF_WAITOK
, &m
)))
4924 bzero(&msg
, sizeof(msg
));
4925 if ((sotype
== SOCK_DGRAM
) && !sock_isconnected(so
)) {
4926 msg
.msg_name
= saddr
;
4927 msg
.msg_namelen
= saddr
->sa_len
;
4929 if ((error
= sock_sendmbuf(so
, &msg
, m
, 0, &sentlen
)))
4935 /* wait for the response */
4936 if (sotype
== SOCK_STREAM
) {
4937 /* try to read (more of) record */
4938 error
= nfs_rpc_record_read(so
, &nrrs
, 0, &recv
, &mrep
);
4939 /* if we don't have the whole record yet, we'll keep trying */
4942 bzero(&msg
, sizeof(msg
));
4943 error
= sock_receivembuf(so
, &msg
, &mrep
, 0, &readlen
);
4945 if (error
== EWOULDBLOCK
)
4948 /* parse the response */
4949 nfsm_chain_dissect_init(error
, nmrep
, mrep
);
4950 nfsm_chain_get_32(error
, nmrep
, rxid
);
4951 nfsm_chain_get_32(error
, nmrep
, reply
);
4953 if ((rxid
!= xid
) || (reply
!= RPC_REPLY
))
4955 nfsm_chain_get_32(error
, nmrep
, reply_status
);
4957 if (reply_status
== RPC_MSGDENIED
) {
4958 nfsm_chain_get_32(error
, nmrep
, rejected_status
);
4960 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
4963 nfsm_chain_get_32(error
, nmrep
, verf_type
); /* verifier flavor */
4964 nfsm_chain_get_32(error
, nmrep
, verf_len
); /* verifier length */
4967 nfsm_chain_adv(error
, nmrep
, nfsm_rndup(verf_len
));
4968 nfsm_chain_get_32(error
, nmrep
, accepted_status
);
4970 switch (accepted_status
) {
4974 case RPC_PROGUNAVAIL
:
4975 error
= EPROGUNAVAIL
;
4977 case RPC_PROGMISMATCH
:
4978 error
= EPROGMISMATCH
;
4980 case RPC_PROCUNAVAIL
:
4981 error
= EPROCUNAVAIL
;
4986 case RPC_SYSTEM_ERR
:
4995 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, sizeof(tv
));
4996 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, sizeof(tv
));
4999 sock_shutdown(newso
, SHUT_RDWR
);
5008 struct nfsmount
*nmp
,
5010 struct sockaddr
*sa
,
5017 thread_t thd
= vfs_context_thread(ctx
);
5018 kauth_cred_t cred
= vfs_context_ucred(ctx
);
5019 struct sockaddr_storage ss
;
5020 struct sockaddr
*saddr
= (struct sockaddr
*)&ss
;
5021 struct nfsm_chain nmreq
, nmrep
;
5023 int error
= 0, ip
, pmprog
, pmvers
, pmproc
, ualen
= 0;
5026 char uaddr
[MAX_IPv6_STR_LEN
+16];
5028 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5029 if (saddr
->sa_family
== AF_INET
) {
5033 pmproc
= PMAPPROC_GETPORT
;
5034 } else if (saddr
->sa_family
== AF_INET6
) {
5038 pmproc
= RPCBPROC_GETVERSADDR
;
5042 nfsm_chain_null(&nmreq
);
5043 nfsm_chain_null(&nmrep
);
5046 /* send portmapper request to get port/uaddr */
5048 ((struct sockaddr_in
*)saddr
)->sin_port
= htons(PMAPPORT
);
5050 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(PMAPPORT
);
5051 nfsm_chain_build_alloc_init(error
, &nmreq
, 8*NFSX_UNSIGNED
);
5052 nfsm_chain_add_32(error
, &nmreq
, protocol
);
5053 nfsm_chain_add_32(error
, &nmreq
, vers
);
5055 nfsm_chain_add_32(error
, &nmreq
, ipproto
);
5056 nfsm_chain_add_32(error
, &nmreq
, 0);
5058 if (ipproto
== IPPROTO_TCP
)
5059 nfsm_chain_add_string(error
, &nmreq
, "tcp6", 4);
5061 nfsm_chain_add_string(error
, &nmreq
, "udp6", 4);
5062 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* uaddr */
5063 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* owner */
5065 nfsm_chain_build_done(error
, &nmreq
);
5067 error
= nfsm_rpchead2(nmp
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5068 pmprog
, pmvers
, pmproc
, RPCAUTH_SYS
, cred
, NULL
, nmreq
.nmc_mhead
,
5071 nmreq
.nmc_mhead
= NULL
;
5072 error
= nfs_aux_request(nmp
, thd
, saddr
, so
, (ipproto
== IPPROTO_UDP
) ? SOCK_DGRAM
: SOCK_STREAM
,
5073 mreq
, R_XID32(xid
), 0, timeo
, &nmrep
);
5075 /* grab port from portmap response */
5077 nfsm_chain_get_32(error
, &nmrep
, port
);
5079 ((struct sockaddr_in
*)sa
)->sin_port
= htons(port
);
5081 /* get uaddr string and convert to sockaddr */
5082 nfsm_chain_get_32(error
, &nmrep
, ualen
);
5084 if (ualen
> ((int)sizeof(uaddr
)-1))
5087 /* program is not available, just return a zero port */
5088 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5089 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(0);
5091 nfsm_chain_get_opaque(error
, &nmrep
, ualen
, uaddr
);
5093 uaddr
[ualen
] = '\0';
5094 if (!nfs_uaddr2sockaddr(uaddr
, saddr
))
5099 if ((error
== EPROGMISMATCH
) || (error
== EPROCUNAVAIL
) || (error
== EIO
) || (error
== EBADRPC
)) {
5100 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
5101 if (pmvers
== RPCBVERS4
) {
5102 /* fall back to v3 and GETADDR */
5104 pmproc
= RPCBPROC_GETADDR
;
5105 nfsm_chain_cleanup(&nmreq
);
5106 nfsm_chain_cleanup(&nmrep
);
5107 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5114 bcopy(saddr
, sa
, min(saddr
->sa_len
, sa
->sa_len
));
5117 nfsm_chain_cleanup(&nmreq
);
5118 nfsm_chain_cleanup(&nmrep
);
5123 nfs_msg(thread_t thd
,
5128 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5132 tpr
= tprintf_open(p
);
5136 tprintf(tpr
, "nfs server %s: %s, error %d\n", server
, msg
, error
);
5138 tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);
5144 nfs_down(struct nfsmount
*nmp
, thread_t thd
, int error
, int flags
, const char *msg
)
5146 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
5147 uint32_t do_vfs_signal
;
5153 lck_mtx_lock(&nmp
->nm_lock
);
5155 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5156 if (NMFLAG(nmp
, MUTEJUKEBOX
)) /* jukebox timeouts don't count as unresponsive if muted */
5157 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
5158 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
5160 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5161 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
5163 if ((flags
& NFSSTA_TIMEO
) && !(nmp
->nm_state
& NFSSTA_TIMEO
))
5164 nmp
->nm_state
|= NFSSTA_TIMEO
;
5165 if ((flags
& NFSSTA_LOCKTIMEO
) && !(nmp
->nm_state
& NFSSTA_LOCKTIMEO
))
5166 nmp
->nm_state
|= NFSSTA_LOCKTIMEO
;
5167 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && !(nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
))
5168 nmp
->nm_state
|= NFSSTA_JUKEBOXTIMEO
;
5170 unresponsive
= (nmp
->nm_state
& timeoutmask
);
5172 if (unresponsive
&& (nmp
->nm_deadtimeout
> 0)) {
5174 if (!wasunresponsive
) {
5175 nmp
->nm_deadto_start
= now
.tv_sec
;
5176 nfs_mount_sock_thread_wake(nmp
);
5177 } else if ((now
.tv_sec
- nmp
->nm_deadto_start
) > nmp
->nm_deadtimeout
) {
5178 if (!(nmp
->nm_state
& NFSSTA_DEAD
))
5179 printf("nfs server %s: dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
5180 nmp
->nm_state
|= NFSSTA_DEAD
;
5183 lck_mtx_unlock(&nmp
->nm_lock
);
5185 if (nmp
->nm_state
& NFSSTA_DEAD
)
5186 do_vfs_signal
= VQ_DEAD
;
5187 else if (softnobrowse
|| wasunresponsive
|| !unresponsive
)
5190 do_vfs_signal
= VQ_NOTRESP
;
5192 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, do_vfs_signal
, 0);
5194 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, error
);
5198 nfs_up(struct nfsmount
*nmp
, thread_t thd
, int flags
, const char *msg
)
5200 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
5207 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, 0);
5209 lck_mtx_lock(&nmp
->nm_lock
);
5211 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
5212 if (NMFLAG(nmp
, MUTEJUKEBOX
)) /* jukebox timeouts don't count as unresponsive if muted */
5213 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
5214 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
5216 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5217 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
5219 if ((flags
& NFSSTA_TIMEO
) && (nmp
->nm_state
& NFSSTA_TIMEO
))
5220 nmp
->nm_state
&= ~NFSSTA_TIMEO
;
5221 if ((flags
& NFSSTA_LOCKTIMEO
) && (nmp
->nm_state
& NFSSTA_LOCKTIMEO
))
5222 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
5223 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && (nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
))
5224 nmp
->nm_state
&= ~NFSSTA_JUKEBOXTIMEO
;
5226 unresponsive
= (nmp
->nm_state
& timeoutmask
);
5228 if (nmp
->nm_deadto_start
)
5229 nmp
->nm_deadto_start
= 0;
5230 lck_mtx_unlock(&nmp
->nm_lock
);
5235 do_vfs_signal
= (wasunresponsive
&& !unresponsive
);
5237 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_NOTRESP
, 1);
5241 #endif /* NFSCLIENT */
5246 * Generate the rpc reply header
5247 * siz arg. is used to decide if adding a cluster is worthwhile
5251 struct nfsrv_descript
*nd
,
5252 __unused
struct nfsrv_sock
*slp
,
5253 struct nfsm_chain
*nmrepp
,
5258 struct nfsm_chain nmrep
;
5261 err
= nd
->nd_repstat
;
5262 if (err
&& (nd
->nd_vers
== NFS_VER2
))
5266 * If this is a big reply, use a cluster else
5267 * try and leave leading space for the lower level headers.
5269 siz
+= RPC_REPLYSIZ
;
5270 if (siz
>= nfs_mbuf_minclsize
) {
5271 error
= mbuf_getpacket(MBUF_WAITOK
, &mrep
);
5273 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mrep
);
5276 /* unable to allocate packet */
5277 /* XXX should we keep statistics for these errors? */
5280 if (siz
< nfs_mbuf_minclsize
) {
5281 /* leave space for lower level headers */
5282 tl
= mbuf_data(mrep
);
5283 tl
+= 80/sizeof(*tl
); /* XXX max_hdr? XXX */
5284 mbuf_setdata(mrep
, tl
, 6 * NFSX_UNSIGNED
);
5286 nfsm_chain_init(&nmrep
, mrep
);
5287 nfsm_chain_add_32(error
, &nmrep
, nd
->nd_retxid
);
5288 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
5289 if (err
== ERPCMISMATCH
|| (err
& NFSERR_AUTHERR
)) {
5290 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
5291 if (err
& NFSERR_AUTHERR
) {
5292 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
5293 nfsm_chain_add_32(error
, &nmrep
, (err
& ~NFSERR_AUTHERR
));
5295 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
5296 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
5297 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
5301 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
5302 if (nd
->nd_gss_context
!= NULL
) {
5303 /* RPCSEC_GSS verifier */
5304 error
= nfs_gss_svc_verf_put(nd
, &nmrep
);
5306 nfsm_chain_add_32(error
, &nmrep
, RPC_SYSTEM_ERR
);
5310 /* RPCAUTH_NULL verifier */
5311 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
5312 nfsm_chain_add_32(error
, &nmrep
, 0);
5314 /* accepted status */
5317 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
5320 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
5321 /* XXX hard coded versions? */
5322 nfsm_chain_add_32(error
, &nmrep
, NFS_VER2
);
5323 nfsm_chain_add_32(error
, &nmrep
, NFS_VER3
);
5326 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
5329 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
5332 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
5333 if (nd
->nd_gss_context
!= NULL
)
5334 error
= nfs_gss_svc_prepare_reply(nd
, &nmrep
);
5335 if (err
!= NFSERR_RETVOID
)
5336 nfsm_chain_add_32(error
, &nmrep
,
5337 (err
? nfsrv_errmap(nd
, err
) : 0));
5343 nfsm_chain_build_done(error
, &nmrep
);
5345 /* error composing reply header */
5346 /* XXX should we keep statistics for these errors? */
5352 if ((err
!= 0) && (err
!= NFSERR_RETVOID
))
5353 OSAddAtomic(1, &nfsstats
.srvrpc_errs
);
5358 * The nfs server send routine.
5360 * - return EINTR or ERESTART if interrupted by a signal
5361 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
5362 * - do any cleanup required by recoverable socket errors (???)
5365 nfsrv_send(struct nfsrv_sock
*slp
, mbuf_t nam
, mbuf_t top
)
5368 socket_t so
= slp
->ns_so
;
5369 struct sockaddr
*sendnam
;
5372 bzero(&msg
, sizeof(msg
));
5373 if (nam
&& !sock_isconnected(so
) && (slp
->ns_sotype
!= SOCK_STREAM
)) {
5374 if ((sendnam
= mbuf_data(nam
))) {
5375 msg
.msg_name
= (caddr_t
)sendnam
;
5376 msg
.msg_namelen
= sendnam
->sa_len
;
5379 error
= sock_sendmbuf(so
, &msg
, top
, 0, NULL
);
5382 log(LOG_INFO
, "nfsd send error %d\n", error
);
5384 if ((error
== EWOULDBLOCK
) && (slp
->ns_sotype
== SOCK_STREAM
))
5385 error
= EPIPE
; /* zap TCP sockets if they time out on send */
5387 /* Handle any recoverable (soft) socket errors here. (???) */
5388 if (error
!= EINTR
&& error
!= ERESTART
&& error
!= EIO
&&
5389 error
!= EWOULDBLOCK
&& error
!= EPIPE
)
5396 * Socket upcall routine for the nfsd sockets.
5397 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
5398 * Essentially do as much as possible non-blocking, else punt and it will
5399 * be called with MBUF_WAITOK from an nfsd.
5402 nfsrv_rcv(socket_t so
, void *arg
, int waitflag
)
5404 struct nfsrv_sock
*slp
= arg
;
5406 if (!nfsd_thread_count
|| !(slp
->ns_flag
& SLP_VALID
))
5409 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
5410 nfsrv_rcv_locked(so
, slp
, waitflag
);
5411 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
5414 nfsrv_rcv_locked(socket_t so
, struct nfsrv_sock
*slp
, int waitflag
)
5416 mbuf_t m
, mp
, mhck
, m2
;
5417 int ns_flag
=0, error
;
5421 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
5422 if (waitflag
== MBUF_DONTWAIT
)
5423 lck_rw_done(&slp
->ns_rwlock
);
5429 * Define this to test for nfsds handling this under heavy load.
5431 if (waitflag
== MBUF_DONTWAIT
) {
5432 ns_flag
= SLP_NEEDQ
;
5436 if (slp
->ns_sotype
== SOCK_STREAM
) {
5438 * If there are already records on the queue, defer soreceive()
5439 * to an(other) nfsd so that there is feedback to the TCP layer that
5440 * the nfs servers are heavily loaded.
5443 ns_flag
= SLP_NEEDQ
;
5450 bytes_read
= 1000000000;
5451 error
= sock_receivembuf(so
, NULL
, &mp
, MSG_DONTWAIT
, &bytes_read
);
5452 if (error
|| mp
== NULL
) {
5453 if (error
== EWOULDBLOCK
)
5454 ns_flag
= (waitflag
== MBUF_DONTWAIT
) ? SLP_NEEDQ
: 0;
5456 ns_flag
= SLP_DISCONN
;
5460 if (slp
->ns_rawend
) {
5461 if ((error
= mbuf_setnext(slp
->ns_rawend
, m
)))
5462 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error
);
5463 slp
->ns_cc
+= bytes_read
;
5466 slp
->ns_cc
= bytes_read
;
5468 while ((m2
= mbuf_next(m
)))
5473 * Now try and parse record(s) out of the raw stream data.
5475 error
= nfsrv_getstream(slp
, waitflag
);
5478 ns_flag
= SLP_DISCONN
;
5480 ns_flag
= SLP_NEEDQ
;
5483 struct sockaddr_storage nam
;
5485 if (slp
->ns_reccnt
>= nfsrv_sock_max_rec_queue_length
) {
5486 /* already have max # RPC records queued on this socket */
5487 ns_flag
= SLP_NEEDQ
;
5491 bzero(&msg
, sizeof(msg
));
5492 msg
.msg_name
= (caddr_t
)&nam
;
5493 msg
.msg_namelen
= sizeof(nam
);
5496 bytes_read
= 1000000000;
5497 error
= sock_receivembuf(so
, &msg
, &mp
, MSG_DONTWAIT
| MSG_NEEDSA
, &bytes_read
);
5499 if (msg
.msg_name
&& (mbuf_get(MBUF_WAITOK
, MBUF_TYPE_SONAME
, &mhck
) == 0)) {
5500 mbuf_setlen(mhck
, nam
.ss_len
);
5501 bcopy(&nam
, mbuf_data(mhck
), nam
.ss_len
);
5503 if (mbuf_setnext(m
, mp
)) {
5504 /* trouble... just drop it */
5505 printf("nfsrv_rcv: mbuf_setnext failed\n");
5513 mbuf_setnextpkt(slp
->ns_recend
, m
);
5516 slp
->ns_flag
|= SLP_DOREC
;
5519 mbuf_setnextpkt(m
, NULL
);
5526 * Now try and process the request records, non-blocking.
5530 slp
->ns_flag
|= ns_flag
;
5531 if (waitflag
== MBUF_DONTWAIT
) {
5532 int wake
= (slp
->ns_flag
& SLP_WORKTODO
);
5533 lck_rw_done(&slp
->ns_rwlock
);
5534 if (wake
&& nfsd_thread_count
) {
5535 lck_mtx_lock(nfsd_mutex
);
5536 nfsrv_wakenfsd(slp
);
5537 lck_mtx_unlock(nfsd_mutex
);
5543 * Try and extract an RPC request from the mbuf data list received on a
5544 * stream socket. The "waitflag" argument indicates whether or not it
5548 nfsrv_getstream(struct nfsrv_sock
*slp
, int waitflag
)
5551 char *cp1
, *cp2
, *mdata
;
5552 int len
, mlen
, error
;
5553 mbuf_t om
, m2
, recm
;
5556 if (slp
->ns_flag
& SLP_GETSTREAM
)
5557 panic("nfs getstream");
5558 slp
->ns_flag
|= SLP_GETSTREAM
;
5560 if (slp
->ns_reclen
== 0) {
5561 if (slp
->ns_cc
< NFSX_UNSIGNED
) {
5562 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5566 mdata
= mbuf_data(m
);
5568 if (mlen
>= NFSX_UNSIGNED
) {
5569 bcopy(mdata
, (caddr_t
)&recmark
, NFSX_UNSIGNED
);
5570 mdata
+= NFSX_UNSIGNED
;
5571 mlen
-= NFSX_UNSIGNED
;
5572 mbuf_setdata(m
, mdata
, mlen
);
5574 cp1
= (caddr_t
)&recmark
;
5576 while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
5584 mbuf_setdata(m
, cp2
, mlen
);
5587 slp
->ns_cc
-= NFSX_UNSIGNED
;
5588 recmark
= ntohl(recmark
);
5589 slp
->ns_reclen
= recmark
& ~0x80000000;
5590 if (recmark
& 0x80000000)
5591 slp
->ns_flag
|= SLP_LASTFRAG
;
5593 slp
->ns_flag
&= ~SLP_LASTFRAG
;
5594 if (slp
->ns_reclen
<= 0 || slp
->ns_reclen
> NFS_MAXPACKET
) {
5595 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5601 * Now get the record part.
5603 * Note that slp->ns_reclen may be 0. Linux sometimes
5604 * generates 0-length RPCs
5607 if (slp
->ns_cc
== slp
->ns_reclen
) {
5609 slp
->ns_raw
= slp
->ns_rawend
= NULL
;
5610 slp
->ns_cc
= slp
->ns_reclen
= 0;
5611 } else if (slp
->ns_cc
> slp
->ns_reclen
) {
5615 mdata
= mbuf_data(m
);
5617 while (len
< slp
->ns_reclen
) {
5618 if ((len
+ mlen
) > slp
->ns_reclen
) {
5619 if (mbuf_copym(m
, 0, slp
->ns_reclen
- len
, waitflag
, &m2
)) {
5620 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5621 return (EWOULDBLOCK
);
5624 if (mbuf_setnext(om
, m2
)) {
5625 /* trouble... just drop it */
5626 printf("nfsrv_getstream: mbuf_setnext failed\n");
5628 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5629 return (EWOULDBLOCK
);
5635 mdata
+= slp
->ns_reclen
- len
;
5636 mlen
-= slp
->ns_reclen
- len
;
5637 mbuf_setdata(m
, mdata
, mlen
);
5638 len
= slp
->ns_reclen
;
5639 } else if ((len
+ mlen
) == slp
->ns_reclen
) {
5644 if (mbuf_setnext(om
, NULL
)) {
5645 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
5646 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5647 return (EWOULDBLOCK
);
5650 mdata
= mbuf_data(m
);
5656 mdata
= mbuf_data(m
);
5663 slp
->ns_flag
&= ~SLP_GETSTREAM
;
5668 * Accumulate the fragments into a record.
5670 if (slp
->ns_frag
== NULL
) {
5671 slp
->ns_frag
= recm
;
5674 while ((m2
= mbuf_next(m
)))
5676 if ((error
= mbuf_setnext(m
, recm
)))
5677 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error
);
5679 if (slp
->ns_flag
& SLP_LASTFRAG
) {
5681 mbuf_setnextpkt(slp
->ns_recend
, slp
->ns_frag
);
5683 slp
->ns_rec
= slp
->ns_frag
;
5684 slp
->ns_flag
|= SLP_DOREC
;
5686 slp
->ns_recend
= slp
->ns_frag
;
5687 slp
->ns_frag
= NULL
;
5693 * Parse an RPC header.
5697 struct nfsrv_sock
*slp
,
5699 struct nfsrv_descript
**ndp
)
5703 struct nfsrv_descript
*nd
;
5707 if (!(slp
->ns_flag
& (SLP_VALID
|SLP_DOREC
)) || (slp
->ns_rec
== NULL
))
5709 MALLOC_ZONE(nd
, struct nfsrv_descript
*,
5710 sizeof (struct nfsrv_descript
), M_NFSRVDESC
, M_WAITOK
);
5714 slp
->ns_rec
= mbuf_nextpkt(m
);
5716 mbuf_setnextpkt(m
, NULL
);
5718 slp
->ns_flag
&= ~SLP_DOREC
;
5719 slp
->ns_recend
= NULL
;
5722 if (mbuf_type(m
) == MBUF_TYPE_SONAME
) {
5725 if ((error
= mbuf_setnext(nam
, NULL
)))
5726 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error
);
5730 nfsm_chain_dissect_init(error
, &nd
->nd_nmreq
, m
);
5732 error
= nfsrv_getreq(nd
);
5736 if (nd
->nd_gss_context
)
5737 nfs_gss_svc_ctx_deref(nd
->nd_gss_context
);
5738 FREE_ZONE(nd
, sizeof(*nd
), M_NFSRVDESC
);
5748 * Parse an RPC request
5750 * - fill in the cred struct.
5753 nfsrv_getreq(struct nfsrv_descript
*nd
)
5755 struct nfsm_chain
*nmreq
;
5757 u_int32_t nfsvers
, auth_type
;
5765 nd
->nd_gss_context
= NULL
;
5766 nd
->nd_gss_seqnum
= 0;
5767 nd
->nd_gss_mb
= NULL
;
5769 user_id
= group_id
= -2;
5770 val
= auth_type
= len
= 0;
5772 nmreq
= &nd
->nd_nmreq
;
5773 nfsm_chain_get_32(error
, nmreq
, nd
->nd_retxid
); // XID
5774 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Call
5775 if (!error
&& (val
!= RPC_CALL
))
5779 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Version
5781 if (val
!= RPC_VER2
) {
5782 nd
->nd_repstat
= ERPCMISMATCH
;
5783 nd
->nd_procnum
= NFSPROC_NOOP
;
5786 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Program Number
5788 if (val
!= NFS_PROG
) {
5789 nd
->nd_repstat
= EPROGUNAVAIL
;
5790 nd
->nd_procnum
= NFSPROC_NOOP
;
5793 nfsm_chain_get_32(error
, nmreq
, nfsvers
);// NFS Version Number
5795 if ((nfsvers
< NFS_VER2
) || (nfsvers
> NFS_VER3
)) {
5796 nd
->nd_repstat
= EPROGMISMATCH
;
5797 nd
->nd_procnum
= NFSPROC_NOOP
;
5800 nd
->nd_vers
= nfsvers
;
5801 nfsm_chain_get_32(error
, nmreq
, nd
->nd_procnum
);// NFS Procedure Number
5803 if ((nd
->nd_procnum
>= NFS_NPROCS
) ||
5804 ((nd
->nd_vers
== NFS_VER2
) && (nd
->nd_procnum
> NFSV2PROC_STATFS
))) {
5805 nd
->nd_repstat
= EPROCUNAVAIL
;
5806 nd
->nd_procnum
= NFSPROC_NOOP
;
5809 if (nfsvers
!= NFS_VER3
)
5810 nd
->nd_procnum
= nfsv3_procid
[nd
->nd_procnum
];
5811 nfsm_chain_get_32(error
, nmreq
, auth_type
); // Auth Flavor
5812 nfsm_chain_get_32(error
, nmreq
, len
); // Auth Length
5813 if (!error
&& (len
< 0 || len
> RPCAUTH_MAXSIZ
))
5817 /* Handle authentication */
5818 if (auth_type
== RPCAUTH_SYS
) {
5819 struct posix_cred temp_pcred
;
5820 if (nd
->nd_procnum
== NFSPROC_NULL
)
5822 nd
->nd_sec
= RPCAUTH_SYS
;
5823 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // skip stamp
5824 nfsm_chain_get_32(error
, nmreq
, len
); // hostname length
5825 if (len
< 0 || len
> NFS_MAXNAMLEN
)
5827 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
)); // skip hostname
5830 /* create a temporary credential using the bits from the wire */
5831 bzero(&temp_pcred
, sizeof(temp_pcred
));
5832 nfsm_chain_get_32(error
, nmreq
, user_id
);
5833 nfsm_chain_get_32(error
, nmreq
, group_id
);
5834 temp_pcred
.cr_groups
[0] = group_id
;
5835 nfsm_chain_get_32(error
, nmreq
, len
); // extra GID count
5836 if ((len
< 0) || (len
> RPCAUTH_UNIXGIDS
))
5839 for (i
= 1; i
<= len
; i
++)
5841 nfsm_chain_get_32(error
, nmreq
, temp_pcred
.cr_groups
[i
]);
5843 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
);
5845 ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (len
+ 1);
5847 nfsrv_group_sort(&temp_pcred
.cr_groups
[0], ngroups
);
5848 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
5849 nfsm_chain_get_32(error
, nmreq
, len
); // verifier length
5850 if (len
< 0 || len
> RPCAUTH_MAXSIZ
)
5853 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
));
5855 /* request creation of a real credential */
5856 temp_pcred
.cr_uid
= user_id
;
5857 temp_pcred
.cr_ngroups
= ngroups
;
5858 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
5859 if (nd
->nd_cr
== NULL
) {
5860 nd
->nd_repstat
= ENOMEM
;
5861 nd
->nd_procnum
= NFSPROC_NOOP
;
5864 } else if (auth_type
== RPCSEC_GSS
) {
5865 error
= nfs_gss_svc_cred_get(nd
, nmreq
);
5867 if (error
== EINVAL
)
5868 goto nfsmout
; // drop the request
5869 nd
->nd_repstat
= error
;
5870 nd
->nd_procnum
= NFSPROC_NOOP
;
5874 if (nd
->nd_procnum
== NFSPROC_NULL
) // assume it's AUTH_NONE
5876 nd
->nd_repstat
= (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
5877 nd
->nd_procnum
= NFSPROC_NOOP
;
5882 if (IS_VALID_CRED(nd
->nd_cr
))
5883 kauth_cred_unref(&nd
->nd_cr
);
5884 nfsm_chain_cleanup(nmreq
);
5889 * Search for a sleeping nfsd and wake it up.
5890 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
5891 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
5892 * Note: Must be called with nfsd_mutex held.
5895 nfsrv_wakenfsd(struct nfsrv_sock
*slp
)
5899 if ((slp
->ns_flag
& SLP_VALID
) == 0)
5902 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
5903 /* if there's work to do on this socket, make sure it's queued up */
5904 if ((slp
->ns_flag
& SLP_WORKTODO
) && !(slp
->ns_flag
& SLP_QUEUED
)) {
5905 TAILQ_INSERT_TAIL(&nfsrv_sockwait
, slp
, ns_svcq
);
5906 slp
->ns_flag
|= SLP_WAITQ
;
5908 lck_rw_done(&slp
->ns_rwlock
);
5910 /* wake up a waiting nfsd, if possible */
5911 nd
= TAILQ_FIRST(&nfsd_queue
);
5915 TAILQ_REMOVE(&nfsd_queue
, nd
, nfsd_queue
);
5916 nd
->nfsd_flag
&= ~NFSD_WAITING
;
5920 #endif /* NFSSERVER */