2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
68 #include <nfs/nfs_conf.h>
72 * Socket operations for use by nfs
75 #include <sys/param.h>
76 #include <sys/systm.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
94 #include <kern/clock.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/thread_call.h>
101 #include <netinet/in.h>
102 #include <netinet/tcp.h>
104 #include <nfs/rpcv2.h>
105 #include <nfs/krpc.h>
106 #include <nfs/nfsproto.h>
108 #include <nfs/xdr_subs.h>
109 #include <nfs/nfsm_subs.h>
110 #include <nfs/nfs_gss.h>
111 #include <nfs/nfsmount.h>
112 #include <nfs/nfsnode.h>
114 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
115 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
118 boolean_t
current_thread_aborted(void);
119 kern_return_t
thread_terminate(thread_t
);
121 ZONE_DECLARE(nfs_fhandle_zone
, "fhandle", sizeof(struct fhandle
), ZC_NONE
);
122 ZONE_DECLARE(nfs_req_zone
, "NFS req", sizeof(struct nfsreq
), ZC_NONE
);
123 ZONE_DECLARE(nfsrv_descript_zone
, "NFSV3 srvdesc",
124 sizeof(struct nfsrv_descript
), ZC_NONE
);
127 #if CONFIG_NFS_SERVER
128 int nfsrv_sock_max_rec_queue_length
= 128; /* max # RPC records queued on (UDP) socket */
130 int nfsrv_getstream(struct nfsrv_sock
*, int);
131 int nfsrv_getreq(struct nfsrv_descript
*);
132 extern int nfsv3_procid
[NFS_NPROCS
];
133 #endif /* CONFIG_NFS_SERVER */
136 * compare two sockaddr structures
139 nfs_sockaddr_cmp(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
147 if (sa1
->sa_family
!= sa2
->sa_family
) {
148 return (sa1
->sa_family
< sa2
->sa_family
) ? -1 : 1;
150 if (sa1
->sa_len
!= sa2
->sa_len
) {
151 return (sa1
->sa_len
< sa2
->sa_len
) ? -1 : 1;
153 if (sa1
->sa_family
== AF_INET
) {
154 return bcmp(&((struct sockaddr_in
*)sa1
)->sin_addr
,
155 &((struct sockaddr_in
*)sa2
)->sin_addr
, sizeof(((struct sockaddr_in
*)sa1
)->sin_addr
));
157 if (sa1
->sa_family
== AF_INET6
) {
158 return bcmp(&((struct sockaddr_in6
*)sa1
)->sin6_addr
,
159 &((struct sockaddr_in6
*)sa2
)->sin6_addr
, sizeof(((struct sockaddr_in6
*)sa1
)->sin6_addr
));
164 #if CONFIG_NFS_CLIENT
166 int nfs_connect_search_new_socket(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
167 int nfs_connect_search_socket_connect(struct nfsmount
*, struct nfs_socket
*, int);
168 int nfs_connect_search_ping(struct nfsmount
*, struct nfs_socket
*, struct timeval
*);
169 void nfs_connect_search_socket_found(struct nfsmount
*, struct nfs_socket_search
*, struct nfs_socket
*);
170 void nfs_connect_search_socket_reap(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
171 int nfs_connect_search_check(struct nfsmount
*, struct nfs_socket_search
*, struct timeval
*);
172 int nfs_reconnect(struct nfsmount
*);
173 int nfs_connect_setup(struct nfsmount
*);
174 void nfs_mount_sock_thread(void *, wait_result_t
);
175 void nfs_udp_rcv(socket_t
, void*, int);
176 void nfs_tcp_rcv(socket_t
, void*, int);
177 void nfs_sock_poke(struct nfsmount
*);
178 void nfs_request_match_reply(struct nfsmount
*, mbuf_t
);
179 void nfs_reqdequeue(struct nfsreq
*);
180 void nfs_reqbusy(struct nfsreq
*);
181 struct nfsreq
*nfs_reqnext(struct nfsreq
*);
182 int nfs_wait_reply(struct nfsreq
*);
183 void nfs_softterm(struct nfsreq
*);
184 int nfs_can_squish(struct nfsmount
*);
185 int nfs_is_squishy(struct nfsmount
*);
186 int nfs_is_dead(int, struct nfsmount
*);
189 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
190 * Use the mean and mean deviation of rtt for the appropriate type of rpc
191 * for the frequent rpcs and a default for the others.
192 * The justification for doing "other" this way is that these rpcs
193 * happen so infrequently that timer est. would probably be stale.
194 * Also, since many of these rpcs are
195 * non-idempotent, a conservative timeout is desired.
196 * getattr, lookup - A+2D
200 #define NFS_RTO(n, t) \
201 ((t) == 0 ? (n)->nm_timeo : \
203 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
204 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
205 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
206 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
209 * Defines which timer to use for the procnum.
216 static const int proct
[] = {
218 [NFSPROC_GETATTR
] = 1,
219 [NFSPROC_SETATTR
] = 0,
220 [NFSPROC_LOOKUP
] = 2,
221 [NFSPROC_ACCESS
] = 1,
222 [NFSPROC_READLINK
] = 3,
225 [NFSPROC_CREATE
] = 0,
227 [NFSPROC_SYMLINK
] = 0,
229 [NFSPROC_REMOVE
] = 0,
231 [NFSPROC_RENAME
] = 0,
233 [NFSPROC_READDIR
] = 3,
234 [NFSPROC_READDIRPLUS
] = 3,
235 [NFSPROC_FSSTAT
] = 0,
236 [NFSPROC_FSINFO
] = 0,
237 [NFSPROC_PATHCONF
] = 0,
238 [NFSPROC_COMMIT
] = 0,
243 * There is a congestion window for outstanding rpcs maintained per mount
244 * point. The cwnd size is adjusted in roughly the way that:
245 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
246 * SIGCOMM '88". ACM, August 1988.
247 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
248 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
249 * of rpcs is in progress.
250 * (The sent count and cwnd are scaled for integer arith.)
251 * Variants of "slow start" were tried and were found to be too much of a
252 * performance hit (ave. rtt 3 times larger),
253 * I suspect due to the large rtt that nfs rpcs have.
255 #define NFS_CWNDSCALE 256
256 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
257 static int nfs_backoff
[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
260 * Increment location index to next address/server/location.
263 nfs_location_next(struct nfs_fs_locations
*nlp
, struct nfs_location_index
*nlip
)
265 uint8_t loc
= nlip
->nli_loc
;
266 uint8_t serv
= nlip
->nli_serv
;
267 uint8_t addr
= nlip
->nli_addr
;
269 /* move to next address */
271 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
272 /* no more addresses on current server, go to first address of next server */
276 if (serv
>= nlp
->nl_locations
[loc
]->nl_servcount
) {
277 /* no more servers on current location, go to first server of next location */
280 if (loc
>= nlp
->nl_numlocs
) {
281 loc
= 0; /* after last location, wrap back around to first location */
286 * It's possible for this next server to not have any addresses.
287 * Check for that here and go to the next server.
288 * But bail out if we've managed to come back around to the original
289 * location that was passed in. (That would mean no servers had any
290 * addresses. And we don't want to spin here forever.)
292 if ((loc
== nlip
->nli_loc
) && (serv
== nlip
->nli_serv
) && (addr
== nlip
->nli_addr
)) {
295 if (addr
>= nlp
->nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
) {
300 nlip
->nli_serv
= serv
;
301 nlip
->nli_addr
= addr
;
305 * Compare two location indices.
308 nfs_location_index_cmp(struct nfs_location_index
*nlip1
, struct nfs_location_index
*nlip2
)
310 if (nlip1
->nli_loc
!= nlip2
->nli_loc
) {
311 return nlip1
->nli_loc
- nlip2
->nli_loc
;
313 if (nlip1
->nli_serv
!= nlip2
->nli_serv
) {
314 return nlip1
->nli_serv
- nlip2
->nli_serv
;
316 return nlip1
->nli_addr
- nlip2
->nli_addr
;
320 * Get the mntfromname (or path portion only) for a given location.
323 nfs_location_mntfromname(struct nfs_fs_locations
*locs
, struct nfs_location_index idx
, char *s
, size_t size
, int pathonly
)
325 struct nfs_fs_location
*fsl
= locs
->nl_locations
[idx
.nli_loc
];
331 char *name
= fsl
->nl_servers
[idx
.nli_serv
]->ns_name
;
336 if (*fsl
->nl_servers
[idx
.nli_serv
]->ns_addresses
[idx
.nli_addr
]) {
337 name
= fsl
->nl_servers
[idx
.nli_serv
]->ns_addresses
[idx
.nli_addr
];
339 cnt
= scnprintf(p
, size
, "<%s>:", name
);
341 cnt
= scnprintf(p
, size
, "%s:", name
);
346 if (fsl
->nl_path
.np_compcount
== 0) {
347 /* mounting root export on server */
354 /* append each server path component */
355 for (i
= 0; (size
> 0) && (i
< (int)fsl
->nl_path
.np_compcount
); i
++) {
356 cnt
= scnprintf(p
, size
, "/%s", fsl
->nl_path
.np_components
[i
]);
363 * NFS client connect socket upcall.
364 * (Used only during socket connect/search.)
367 nfs_connect_upcall(socket_t so
, void *arg
, __unused
int waitflag
)
369 struct nfs_socket
*nso
= arg
;
372 int error
= 0, recv
= 1;
374 if (nso
->nso_flags
& NSO_CONNECTING
) {
375 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso
, nso
->nso_flags
);
376 wakeup(nso
->nso_wake
);
380 lck_mtx_lock(&nso
->nso_lock
);
381 if ((nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) || !(nso
->nso_flags
& NSO_PINGING
)) {
382 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso
);
383 lck_mtx_unlock(&nso
->nso_lock
);
386 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso
, nso
->nso_flags
);
387 nso
->nso_flags
|= NSO_UPCALL
;
389 /* loop while we make error-free progress */
390 while (!error
&& recv
) {
391 /* make sure we're still interested in this socket */
392 if (nso
->nso_flags
& (NSO_DISCONNECTING
| NSO_DEAD
)) {
395 lck_mtx_unlock(&nso
->nso_lock
);
397 if (nso
->nso_sotype
== SOCK_STREAM
) {
398 error
= nfs_rpc_record_read(so
, &nso
->nso_rrs
, MSG_DONTWAIT
, &recv
, &m
);
399 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error
, recv
);
402 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
405 lck_mtx_lock(&nso
->nso_lock
);
407 /* match response with request */
408 struct nfsm_chain nmrep
;
409 uint32_t reply
= 0, rxid
= 0, verf_type
, verf_len
;
410 uint32_t reply_status
, rejected_status
, accepted_status
;
412 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m
);
413 nfsm_chain_dissect_init(error
, &nmrep
, m
);
414 nfsm_chain_get_32(error
, &nmrep
, rxid
);
415 nfsm_chain_get_32(error
, &nmrep
, reply
);
416 if (!error
&& ((reply
!= RPC_REPLY
) || (rxid
!= nso
->nso_pingxid
))) {
419 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
420 if (!error
&& (reply_status
== RPC_MSGDENIED
)) {
421 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
423 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
426 nfsm_chain_get_32(error
, &nmrep
, verf_type
); /* verifier flavor */
427 nfsm_chain_get_32(error
, &nmrep
, verf_len
); /* verifier length */
430 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
432 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
434 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status
, nso
->nso_version
);
435 if ((accepted_status
== RPC_PROGMISMATCH
) && !nso
->nso_version
) {
436 uint32_t minvers
, maxvers
;
437 nfsm_chain_get_32(error
, &nmrep
, minvers
);
438 nfsm_chain_get_32(error
, &nmrep
, maxvers
);
440 if (nso
->nso_protocol
== PMAPPROG
) {
441 if ((minvers
> RPCBVERS4
) || (maxvers
< PMAPVERS
)) {
442 error
= EPROGMISMATCH
;
443 } else if ((nso
->nso_saddr
->sa_family
== AF_INET
) &&
444 (PMAPVERS
>= minvers
) && (PMAPVERS
<= maxvers
)) {
445 nso
->nso_version
= PMAPVERS
;
446 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
447 if ((RPCBVERS4
>= minvers
) && (RPCBVERS4
<= maxvers
)) {
448 nso
->nso_version
= RPCBVERS4
;
449 } else if ((RPCBVERS3
>= minvers
) && (RPCBVERS3
<= maxvers
)) {
450 nso
->nso_version
= RPCBVERS3
;
453 } else if (nso
->nso_protocol
== NFS_PROG
) {
457 * N.B. Both portmapper and rpcbind V3 are happy to return
458 * addresses for other versions than the one you ask (getport or
459 * getaddr) and thus we may have fallen to this code path. So if
460 * we get a version that we support, use highest supported
461 * version. This assumes that the server supports all versions
462 * between minvers and maxvers. Note for IPv6 we will try and
463 * use rpcbind V4 which has getversaddr and we should not get
464 * here if that was successful.
466 for (vers
= nso
->nso_nfs_max_vers
; vers
>= (int)nso
->nso_nfs_min_vers
; vers
--) {
467 if (vers
>= (int)minvers
&& vers
<= (int)maxvers
) {
471 nso
->nso_version
= (vers
< (int)nso
->nso_nfs_min_vers
) ? 0 : vers
;
473 if (!error
&& nso
->nso_version
) {
474 accepted_status
= RPC_SUCCESS
;
478 switch (accepted_status
) {
482 case RPC_PROGUNAVAIL
:
483 error
= EPROGUNAVAIL
;
485 case RPC_PROGMISMATCH
:
486 error
= EPROGMISMATCH
;
488 case RPC_PROCUNAVAIL
:
489 error
= EPROCUNAVAIL
;
501 nso
->nso_flags
&= ~NSO_PINGING
;
503 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
504 nso
->nso_protocol
, nso
->nso_version
, error
);
505 nso
->nso_error
= error
;
506 nso
->nso_flags
|= NSO_DEAD
;
508 nso
->nso_flags
|= NSO_VERIFIED
;
511 /* wake up search thread */
512 wakeup(nso
->nso_wake
);
517 nso
->nso_flags
&= ~NSO_UPCALL
;
518 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
519 /* problems with the socket... */
520 NFS_SOCK_DBG("connect upcall failed %d\n", error
);
521 nso
->nso_error
= error
? error
: EPIPE
;
522 nso
->nso_flags
|= NSO_DEAD
;
523 wakeup(nso
->nso_wake
);
525 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
526 wakeup(&nso
->nso_flags
);
528 lck_mtx_unlock(&nso
->nso_lock
);
532 * Create/initialize an nfs_socket structure.
536 struct nfsmount
*nmp
,
543 struct nfs_socket
**nsop
)
545 struct nfs_socket
*nso
;
548 #define NFS_SOCKET_DEBUGGING
549 #ifdef NFS_SOCKET_DEBUGGING
550 char naddr
[sizeof((struct sockaddr_un
*)0)->sun_path
];
553 switch (sa
->sa_family
) {
556 if (sa
->sa_family
== AF_INET
) {
557 sinaddr
= &((struct sockaddr_in
*)sa
)->sin_addr
;
559 sinaddr
= &((struct sockaddr_in6
*)sa
)->sin6_addr
;
561 if (inet_ntop(sa
->sa_family
, sinaddr
, naddr
, sizeof(naddr
)) != naddr
) {
562 strlcpy(naddr
, "<unknown>", sizeof(naddr
));
566 strlcpy(naddr
, ((struct sockaddr_un
*)sa
)->sun_path
, sizeof(naddr
));
569 strlcpy(naddr
, "<unsupported address family>", sizeof(naddr
));
573 char naddr
[1] = { 0 };
578 /* Create the socket. */
579 MALLOC(nso
, struct nfs_socket
*, sizeof(struct nfs_socket
), M_TEMP
, M_WAITOK
| M_ZERO
);
581 MALLOC(nso
->nso_saddr
, struct sockaddr
*, sa
->sa_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
583 if (!nso
|| !nso
->nso_saddr
) {
589 lck_mtx_init(&nso
->nso_lock
, nfs_request_grp
, LCK_ATTR_NULL
);
590 nso
->nso_sotype
= sotype
;
591 if (nso
->nso_sotype
== SOCK_STREAM
) {
592 nfs_rpc_record_state_init(&nso
->nso_rrs
);
595 nso
->nso_timestamp
= now
.tv_sec
;
596 bcopy(sa
, nso
->nso_saddr
, sa
->sa_len
);
597 switch (sa
->sa_family
) {
600 if (sa
->sa_family
== AF_INET
) {
601 ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
602 } else if (sa
->sa_family
== AF_INET6
) {
603 ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
609 nso
->nso_protocol
= protocol
;
610 nso
->nso_version
= vers
;
611 nso
->nso_nfs_min_vers
= PVER2MAJOR(nmp
->nm_min_vers
);
612 nso
->nso_nfs_max_vers
= PVER2MAJOR(nmp
->nm_max_vers
);
614 error
= sock_socket(sa
->sa_family
, nso
->nso_sotype
, 0, NULL
, NULL
, &nso
->nso_so
);
616 /* Some servers require that the client port be a reserved port number. */
617 if (!error
&& resvport
&& ((sa
->sa_family
== AF_INET
) || (sa
->sa_family
== AF_INET6
))) {
618 struct sockaddr_storage ss
;
619 int level
= (sa
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
620 int optname
= (sa
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
621 int portrange
= IP_PORTRANGE_LOW
;
623 error
= sock_setsockopt(nso
->nso_so
, level
, optname
, &portrange
, sizeof(portrange
));
624 if (!error
) { /* bind now to check for failure */
625 ss
.ss_len
= sa
->sa_len
;
626 ss
.ss_family
= sa
->sa_family
;
627 if (ss
.ss_family
== AF_INET
) {
628 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
629 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
630 } else if (ss
.ss_family
== AF_INET6
) {
631 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
632 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
637 error
= sock_bind(nso
->nso_so
, (struct sockaddr
*)&ss
);
643 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
644 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nso
, naddr
, sotype
,
645 resvport
? "r" : "", port
, protocol
, vers
);
646 nfs_socket_destroy(nso
);
648 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
649 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, naddr
,
650 sotype
, resvport
? "r" : "", port
, protocol
, vers
);
657 * Destroy an nfs_socket structure.
660 nfs_socket_destroy(struct nfs_socket
*nso
)
662 struct timespec ts
= { .tv_sec
= 4, .tv_nsec
= 0 };
664 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso
, nso
->nso_flags
, nso
->nso_error
);
665 lck_mtx_lock(&nso
->nso_lock
);
666 nso
->nso_flags
|= NSO_DISCONNECTING
;
667 if (nso
->nso_flags
& NSO_UPCALL
) { /* give upcall a chance to complete */
668 msleep(&nso
->nso_flags
, &nso
->nso_lock
, PZERO
- 1, "nfswaitupcall", &ts
);
670 lck_mtx_unlock(&nso
->nso_lock
);
671 sock_shutdown(nso
->nso_so
, SHUT_RDWR
);
672 sock_close(nso
->nso_so
);
673 if (nso
->nso_sotype
== SOCK_STREAM
) {
674 nfs_rpc_record_state_cleanup(&nso
->nso_rrs
);
676 lck_mtx_destroy(&nso
->nso_lock
, nfs_request_grp
);
677 if (nso
->nso_saddr
) {
678 FREE(nso
->nso_saddr
, M_SONAME
);
680 if (nso
->nso_saddr2
) {
681 FREE(nso
->nso_saddr2
, M_SONAME
);
683 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso
);
688 * Set common socket options on an nfs_socket.
691 nfs_socket_options(struct nfsmount
*nmp
, struct nfs_socket
*nso
)
694 * Set socket send/receive timeouts
695 * - Receive timeout shouldn't matter because most receives are performed
696 * in the socket upcall non-blocking.
697 * - Send timeout should allow us to react to a blocked socket.
698 * Soft mounts will want to abort sooner.
700 struct timeval timeo
;
701 int on
= 1, proto
, reserve
, error
;
704 timeo
.tv_sec
= (NMFLAG(nmp
, SOFT
) || nfs_can_squish(nmp
)) ? 5 : 60;
705 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
706 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
707 if (nso
->nso_sotype
== SOCK_STREAM
) {
708 /* Assume that SOCK_STREAM always requires a connection */
709 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_KEEPALIVE
, &on
, sizeof(on
));
710 /* set nodelay for TCP */
711 sock_gettype(nso
->nso_so
, NULL
, NULL
, &proto
);
712 if (proto
== IPPROTO_TCP
) {
713 sock_setsockopt(nso
->nso_so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
717 /* set socket buffer sizes for UDP/TCP */
718 reserve
= (nso
->nso_sotype
== SOCK_DGRAM
) ? NFS_UDPSOCKBUF
: MAX(nfs_tcp_sockbuf
, nmp
->nm_wsize
* 2);
720 error
= sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_SNDBUF
, &reserve
, sizeof(reserve
));
724 log(LOG_INFO
, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error
, reserve
);
727 reserve
= (nso
->nso_sotype
== SOCK_DGRAM
) ? NFS_UDPSOCKBUF
: MAX(nfs_tcp_sockbuf
, nmp
->nm_rsize
* 2);
728 error
= sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_RCVBUF
, &reserve
, sizeof(reserve
));
730 log(LOG_INFO
, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error
, reserve
);
733 /* set SO_NOADDRERR to detect network changes ASAP */
734 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
735 /* just playin' it safe with upcalls */
736 sock_setsockopt(nso
->nso_so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
737 /* socket should be interruptible if the mount is */
738 if (!NMFLAG(nmp
, INTR
)) {
739 sock_nointerrupt(nso
->nso_so
, 1);
744 * Release resources held in an nfs_socket_search.
747 nfs_socket_search_cleanup(struct nfs_socket_search
*nss
)
749 struct nfs_socket
*nso
, *nsonext
;
751 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
752 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
754 nfs_socket_destroy(nso
);
757 nfs_socket_destroy(nss
->nss_sock
);
758 nss
->nss_sock
= NULL
;
763 * Prefer returning certain errors over others.
764 * This function returns a ranking of the given error.
767 nfs_connect_error_class(int error
)
802 * Make sure a socket search returns the best error.
805 nfs_socket_search_update_error(struct nfs_socket_search
*nss
, int error
)
807 if (nfs_connect_error_class(error
) >= nfs_connect_error_class(nss
->nss_error
)) {
808 nss
->nss_error
= error
;
812 /* nfs_connect_search_new_socket:
813 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
816 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
817 * could not be used or if a socket timed out.
820 nfs_connect_search_new_socket(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
822 struct nfs_fs_location
*fsl
;
823 struct nfs_fs_server
*fss
;
824 struct sockaddr_storage ss
;
825 struct nfs_socket
*nso
;
830 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
831 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
->nss_addrcnt
);
834 * while there are addresses and:
835 * we have no sockets or
836 * the last address failed and did not produce a socket (nss_last < 0) or
837 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
838 * then attempt to create a socket with the current address.
840 while (nss
->nss_addrcnt
> 0 && ((nss
->nss_last
< 0) || (nss
->nss_sockcnt
== 0) ||
841 ((nss
->nss_sockcnt
< 4) && (now
->tv_sec
>= (nss
->nss_last
+ 2))))) {
842 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
845 /* Can we convert the address to a sockaddr? */
846 fsl
= nmp
->nm_locations
.nl_locations
[nss
->nss_nextloc
.nli_loc
];
847 fss
= fsl
->nl_servers
[nss
->nss_nextloc
.nli_serv
];
848 addrstr
= fss
->ns_addresses
[nss
->nss_nextloc
.nli_addr
];
849 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr
, nss
->nss_protocol
, nss
->nss_port
);
850 if (*addrstr
== '\0') {
852 * We have an unspecified local domain address. We use the program to translate to
853 * a well known local transport address. We only support PMAPROG and NFS for this.
855 if (nss
->nss_protocol
== PMAPPROG
) {
856 addrstr
= (nss
->nss_sotype
== SOCK_DGRAM
) ? RPCB_TICLTS_PATH
: RPCB_TICOTSORD_PATH
;
857 } else if (nss
->nss_protocol
== NFS_PROG
) {
858 addrstr
= nmp
->nm_nfs_localport
;
859 if (!addrstr
|| *addrstr
== '\0') {
860 addrstr
= (nss
->nss_sotype
== SOCK_DGRAM
) ? NFS_TICLTS_PATH
: NFS_TICOTSORD_PATH
;
863 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss
->nss_protocol
, addrstr
);
865 if (!nfs_uaddr2sockaddr(addrstr
, (struct sockaddr
*)&ss
)) {
866 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr
);
867 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
868 nss
->nss_addrcnt
-= 1;
872 /* Check that socket family is acceptable. */
873 if (nmp
->nm_sofamily
&& (ss
.ss_family
!= nmp
->nm_sofamily
)) {
874 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss
.ss_family
, nmp
->nm_sofamily
);
875 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
876 nss
->nss_addrcnt
-= 1;
881 /* Create the socket. */
882 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nss
->nss_sotype
,
883 nss
->nss_port
, nss
->nss_protocol
, nss
->nss_version
,
884 ((nss
->nss_protocol
== NFS_PROG
) && NMFLAG(nmp
, RESVPORT
)), &nso
);
889 nso
->nso_location
= nss
->nss_nextloc
;
891 error
= sock_setupcall(nso
->nso_so
, nfs_connect_upcall
, nso
);
893 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso
, error
);
894 lck_mtx_lock(&nso
->nso_lock
);
895 nso
->nso_error
= error
;
896 nso
->nso_flags
|= NSO_DEAD
;
897 lck_mtx_unlock(&nso
->nso_lock
);
900 TAILQ_INSERT_TAIL(&nss
->nss_socklist
, nso
, nso_link
);
902 nfs_location_next(&nmp
->nm_locations
, &nss
->nss_nextloc
);
903 nss
->nss_addrcnt
-= 1;
905 nss
->nss_last
= now
->tv_sec
;
908 if (nss
->nss_addrcnt
== 0 && nss
->nss_last
< 0) {
909 nss
->nss_last
= now
->tv_sec
;
916 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
917 * If successful set the socket options for the socket as require from the mount.
919 * Assumes: nso->nso_lock is held on entry and return.
922 nfs_connect_search_socket_connect(struct nfsmount
*nmp
, struct nfs_socket
*nso
, int verbose
)
926 if ((nso
->nso_sotype
!= SOCK_STREAM
) && NMFLAG(nmp
, NOCONNECT
)) {
927 /* no connection needed, just say it's already connected */
928 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
929 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
930 nso
->nso_flags
|= NSO_CONNECTED
;
931 nfs_socket_options(nmp
, nso
);
932 return 1; /* Socket is connected and setup */
933 } else if (!(nso
->nso_flags
& NSO_CONNECTING
)) {
934 /* initiate the connection */
935 nso
->nso_flags
|= NSO_CONNECTING
;
936 lck_mtx_unlock(&nso
->nso_lock
);
937 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
938 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
,
939 nso
->nso_saddr
->sa_family
== AF_LOCAL
? ((struct sockaddr_un
*)nso
->nso_saddr
)->sun_path
: "");
940 error
= sock_connect(nso
->nso_so
, nso
->nso_saddr
, MSG_DONTWAIT
);
942 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
943 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
945 lck_mtx_lock(&nso
->nso_lock
);
946 if (error
&& (error
!= EINPROGRESS
)) {
947 nso
->nso_error
= error
;
948 nso
->nso_flags
|= NSO_DEAD
;
952 if (nso
->nso_flags
& NSO_CONNECTING
) {
953 /* check the connection */
954 if (sock_isconnected(nso
->nso_so
)) {
955 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
956 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
957 nso
->nso_flags
&= ~NSO_CONNECTING
;
958 nso
->nso_flags
|= NSO_CONNECTED
;
959 nfs_socket_options(nmp
, nso
);
960 return 1; /* Socket is connected and setup */
962 int optlen
= sizeof(error
);
964 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &error
, &optlen
);
965 if (error
) { /* we got an error on the socket */
966 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
967 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
969 printf("nfs connect socket error %d for %s\n",
970 error
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
972 nso
->nso_error
= error
;
973 nso
->nso_flags
|= NSO_DEAD
;
979 return 0; /* Waiting to be connected */
983 * nfs_connect_search_ping: Send a null proc on the nso socket.
986 nfs_connect_search_ping(struct nfsmount
*nmp
, struct nfs_socket
*nso
, struct timeval
*now
)
988 /* initiate a NULL RPC request */
989 uint64_t xid
= nso
->nso_pingxid
;
990 mbuf_t m
, mreq
= NULL
;
992 size_t reqlen
, sentlen
;
993 uint32_t vers
= nso
->nso_version
;
997 if (nso
->nso_protocol
== PMAPPROG
) {
998 vers
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
999 } else if (nso
->nso_protocol
== NFS_PROG
) {
1000 vers
= PVER2MAJOR(nmp
->nm_max_vers
);
1003 lck_mtx_unlock(&nso
->nso_lock
);
1004 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso
, nso
->nso_sotype
, nso
->nso_protocol
, vers
);
1005 error
= nfsm_rpchead2(nmp
, nso
->nso_sotype
, nso
->nso_protocol
, vers
, 0, RPCAUTH_SYS
,
1006 vfs_context_ucred(vfs_context_kernel()), NULL
, NULL
, &xid
, &mreq
);
1007 lck_mtx_lock(&nso
->nso_lock
);
1009 nso
->nso_flags
|= NSO_PINGING
;
1010 nso
->nso_pingxid
= R_XID32(xid
);
1011 nso
->nso_reqtimestamp
= now
->tv_sec
;
1012 bzero(&msg
, sizeof(msg
));
1013 if ((nso
->nso_sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
)) {
1014 msg
.msg_name
= nso
->nso_saddr
;
1015 msg
.msg_namelen
= nso
->nso_saddr
->sa_len
;
1017 for (reqlen
= 0, m
= mreq
; m
; m
= mbuf_next(m
)) {
1018 reqlen
+= mbuf_len(m
);
1020 lck_mtx_unlock(&nso
->nso_lock
);
1021 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq
);
1022 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreq
, 0, &sentlen
);
1023 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1024 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1025 lck_mtx_lock(&nso
->nso_lock
);
1026 if (!error
&& (sentlen
!= reqlen
)) {
1031 nso
->nso_error
= error
;
1032 nso
->nso_flags
|= NSO_DEAD
;
1040 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1041 * Set the nfs socket protocol and version if needed.
1044 nfs_connect_search_socket_found(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct nfs_socket
*nso
)
1046 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1047 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1048 if (!nso
->nso_version
) {
1049 /* If the version isn't set, the default must have worked. */
1050 if (nso
->nso_protocol
== PMAPPROG
) {
1051 nso
->nso_version
= (nso
->nso_saddr
->sa_family
== AF_INET
) ? PMAPVERS
: RPCBVERS4
;
1053 if (nso
->nso_protocol
== NFS_PROG
) {
1054 nso
->nso_version
= PVER2MAJOR(nmp
->nm_max_vers
);
1057 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
1059 nss
->nss_sock
= nso
;
1063 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1064 * the list. Dead socket are then destroyed.
1067 nfs_connect_search_socket_reap(struct nfsmount
*nmp __unused
, struct nfs_socket_search
*nss
, struct timeval
*now
)
1069 struct nfs_socket
*nso
, *nsonext
;
1071 TAILQ_FOREACH_SAFE(nso
, &nss
->nss_socklist
, nso_link
, nsonext
) {
1072 lck_mtx_lock(&nso
->nso_lock
);
1073 if (now
->tv_sec
>= (nso
->nso_timestamp
+ nss
->nss_timeo
)) {
1075 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1076 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1077 nso
->nso_error
= ETIMEDOUT
;
1078 nso
->nso_flags
|= NSO_DEAD
;
1080 if (!(nso
->nso_flags
& NSO_DEAD
)) {
1081 lck_mtx_unlock(&nso
->nso_lock
);
1084 lck_mtx_unlock(&nso
->nso_lock
);
1085 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1086 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, nso
->nso_error
, nso
->nso_flags
);
1087 nfs_socket_search_update_error(nss
, nso
->nso_error
);
1088 TAILQ_REMOVE(&nss
->nss_socklist
, nso
, nso_link
);
1090 nfs_socket_destroy(nso
);
1091 /* If there are more sockets to try, force the starting of another socket */
1092 if (nss
->nss_addrcnt
> 0) {
1099 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1102 nfs_connect_search_check(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
, struct timeval
*now
)
1106 /* log a warning if connect is taking a while */
1107 if (((now
->tv_sec
- nss
->nss_timestamp
) >= 8) && ((nss
->nss_flags
& (NSS_VERBOSE
| NSS_WARNED
)) == NSS_VERBOSE
)) {
1108 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1109 nss
->nss_flags
|= NSS_WARNED
;
1111 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
1114 if ((error
= nfs_sigintr(nmp
, NULL
, current_thread(), 0))) {
1118 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1119 if (nss
->nss_last
>= 0) {
1120 tsleep(nss
, PSOCK
, "nfs_connect_search_wait", hz
);
1128 * Continue the socket search until we have something to report.
1131 nfs_connect_search_loop(struct nfsmount
*nmp
, struct nfs_socket_search
*nss
)
1133 struct nfs_socket
*nso
;
1136 int verbose
= (nss
->nss_flags
& NSS_VERBOSE
);
1140 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, now
.tv_sec
);
1142 /* add a new socket to the socket list if needed and available */
1143 error
= nfs_connect_search_new_socket(nmp
, nss
, &now
);
1145 NFS_SOCK_DBG("nfs connect returned %d\n", error
);
1149 /* check each active socket on the list and try to push it along */
1150 TAILQ_FOREACH(nso
, &nss
->nss_socklist
, nso_link
) {
1151 lck_mtx_lock(&nso
->nso_lock
);
1153 /* If not connected connect it */
1154 if (!(nso
->nso_flags
& NSO_CONNECTED
)) {
1155 if (!nfs_connect_search_socket_connect(nmp
, nso
, verbose
)) {
1156 lck_mtx_unlock(&nso
->nso_lock
);
1161 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1162 if (!(nso
->nso_flags
& (NSO_PINGING
| NSO_VERIFIED
)) ||
1163 ((nso
->nso_sotype
== SOCK_DGRAM
) && (now
.tv_sec
>= nso
->nso_reqtimestamp
+ 2))) {
1164 if (!nfs_connect_search_ping(nmp
, nso
, &now
)) {
1165 lck_mtx_unlock(&nso
->nso_lock
);
1170 /* Has the socket been verified by the up call routine? */
1171 if (nso
->nso_flags
& NSO_VERIFIED
) {
1172 /* WOOHOO!! This socket looks good! */
1173 nfs_connect_search_socket_found(nmp
, nss
, nso
);
1174 lck_mtx_unlock(&nso
->nso_lock
);
1177 lck_mtx_unlock(&nso
->nso_lock
);
1180 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1181 nfs_connect_search_socket_reap(nmp
, nss
, &now
);
1184 * Keep looping if we haven't found a socket yet and we have more
1185 * sockets to (continue to) try.
1188 if (!nss
->nss_sock
&& (!TAILQ_EMPTY(&nss
->nss_socklist
) || nss
->nss_addrcnt
)) {
1189 error
= nfs_connect_search_check(nmp
, nss
, &now
);
1195 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1200 * Initialize a new NFS connection.
1202 * Search for a location to connect a socket to and initialize the connection.
1204 * An NFS mount may have multiple locations/servers/addresses available.
1205 * We attempt to connect to each one asynchronously and will start
1206 * several sockets in parallel if other locations are slow to answer.
1207 * We'll use the first NFS socket we can successfully set up.
1209 * The search may involve contacting the portmapper service first.
1211 * A mount's initial connection may require negotiating some parameters such
1212 * as socket type and NFS version.
1216 nfs_connect(struct nfsmount
*nmp
, int verbose
, int timeo
)
1218 struct nfs_socket_search nss
;
1219 struct nfs_socket
*nso
, *nsonfs
;
1220 struct sockaddr_storage ss
;
1221 struct sockaddr
*saddr
, *oldsaddr
;
1226 struct timeval start
;
1227 int error
, savederror
, nfsvers
;
1229 uint8_t sotype
= nmp
->nm_sotype
? nmp
->nm_sotype
: SOCK_STREAM
;
1230 fhandle_t
*fh
= NULL
;
1235 /* paranoia... check that we have at least one address in the locations */
1237 for (loc
= 0; loc
< nmp
->nm_locations
.nl_numlocs
; loc
++) {
1238 for (serv
= 0; serv
< nmp
->nm_locations
.nl_locations
[loc
]->nl_servcount
; serv
++) {
1239 addrtotal
+= nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
;
1240 if (nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_addrcount
== 0) {
1241 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1242 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1243 nmp
->nm_locations
.nl_locations
[loc
]->nl_servers
[serv
]->ns_name
);
1248 if (addrtotal
== 0) {
1249 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1250 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1253 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1254 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, addrtotal
);
1257 lck_mtx_lock(&nmp
->nm_lock
);
1258 nmp
->nm_sockflags
|= NMSOCK_CONNECTING
;
1260 lck_mtx_unlock(&nmp
->nm_lock
);
1261 microuptime(&start
);
1262 savederror
= error
= 0;
1265 /* initialize socket search state */
1266 bzero(&nss
, sizeof(nss
));
1267 nss
.nss_addrcnt
= addrtotal
;
1268 nss
.nss_error
= savederror
;
1269 TAILQ_INIT(&nss
.nss_socklist
);
1270 nss
.nss_sotype
= sotype
;
1271 nss
.nss_startloc
= nmp
->nm_locations
.nl_current
;
1272 nss
.nss_timestamp
= start
.tv_sec
;
1273 nss
.nss_timeo
= timeo
;
1275 nss
.nss_flags
|= NSS_VERBOSE
;
1278 /* First time connecting, we may need to negotiate some things */
1279 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1280 NFS_SOCK_DBG("so_family = %d\n", nmp
->nm_sofamily
);
1281 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp
->nm_nfsport
, nmp
->nm_nfs_localport
? nmp
->nm_nfs_localport
: "");
1282 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp
->nm_mountport
, nmp
->nm_mount_localport
? nmp
->nm_mount_localport
: "");
1283 if (!nmp
->nm_vers
) {
1284 /* No NFS version specified... */
1285 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1287 if (PVER2MAJOR(nmp
->nm_max_vers
) >= NFS_VER4
&& tryv4
) {
1288 nss
.nss_port
= NFS_PORT
;
1289 nss
.nss_protocol
= NFS_PROG
;
1290 nss
.nss_version
= 4;
1291 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1294 /* ...connect to portmapper first if we (may) need any ports. */
1295 nss
.nss_port
= PMAPPORT
;
1296 nss
.nss_protocol
= PMAPPROG
;
1297 nss
.nss_version
= 0;
1302 /* ...connect to NFS port first. */
1303 nss
.nss_port
= nmp
->nm_nfsport
;
1304 nss
.nss_protocol
= NFS_PROG
;
1305 nss
.nss_version
= 0;
1308 } else if (nmp
->nm_vers
>= NFS_VER4
) {
1310 /* For NFSv4, we use the given (or default) port. */
1311 nss
.nss_port
= nmp
->nm_nfsport
? nmp
->nm_nfsport
: NFS_PORT
;
1312 nss
.nss_protocol
= NFS_PROG
;
1313 nss
.nss_version
= 4;
1315 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1316 * if no port is specified on the mount;
1317 * Note nm_vers is set so we will only try NFS_VER4.
1319 if (!nmp
->nm_nfsport
) {
1320 nss
.nss_flags
|= NSS_FALLBACK2PMAP
;
1323 nss
.nss_port
= PMAPPORT
;
1324 nss
.nss_protocol
= PMAPPROG
;
1325 nss
.nss_version
= 0;
1329 /* For NFSv3/v2... */
1330 if (!nmp
->nm_nfsport
|| (!NM_OMATTR_GIVEN(nmp
, FH
) && !nmp
->nm_mountport
)) {
1331 /* ...connect to portmapper first if we need any ports. */
1332 nss
.nss_port
= PMAPPORT
;
1333 nss
.nss_protocol
= PMAPPROG
;
1334 nss
.nss_version
= 0;
1336 /* ...connect to NFS port first. */
1337 nss
.nss_port
= nmp
->nm_nfsport
;
1338 nss
.nss_protocol
= NFS_PROG
;
1339 nss
.nss_version
= nmp
->nm_vers
;
1342 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1343 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1344 nss
.nss_protocol
, nss
.nss_version
);
1346 /* we've connected before, just connect to NFS port */
1347 if (!nmp
->nm_nfsport
) {
1348 /* need to ask portmapper which port that would be */
1349 nss
.nss_port
= PMAPPORT
;
1350 nss
.nss_protocol
= PMAPPROG
;
1351 nss
.nss_version
= 0;
1353 nss
.nss_port
= nmp
->nm_nfsport
;
1354 nss
.nss_protocol
= NFS_PROG
;
1355 nss
.nss_version
= nmp
->nm_vers
;
1357 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1358 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nss
.nss_sotype
, nss
.nss_port
,
1359 nss
.nss_protocol
, nss
.nss_version
);
1362 /* Set next location to first valid location. */
1363 /* If start location is invalid, find next location. */
1364 nss
.nss_nextloc
= nss
.nss_startloc
;
1365 if ((nss
.nss_nextloc
.nli_serv
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servcount
) ||
1366 (nss
.nss_nextloc
.nli_addr
>= nmp
->nm_locations
.nl_locations
[nss
.nss_nextloc
.nli_loc
]->nl_servers
[nss
.nss_nextloc
.nli_serv
]->ns_addrcount
)) {
1367 nfs_location_next(&nmp
->nm_locations
, &nss
.nss_nextloc
);
1368 if (!nfs_location_index_cmp(&nss
.nss_nextloc
, &nss
.nss_startloc
)) {
1369 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1370 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1378 error
= nfs_connect_search_loop(nmp
, &nss
);
1379 if (error
|| !nss
.nss_sock
) {
1381 nfs_socket_search_cleanup(&nss
);
1382 if (nss
.nss_flags
& NSS_FALLBACK2PMAP
) {
1384 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1385 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1389 if (!error
&& (nss
.nss_sotype
== SOCK_STREAM
) && !nmp
->nm_sotype
&& (nmp
->nm_vers
< NFS_VER4
)) {
1391 sotype
= SOCK_DGRAM
;
1392 savederror
= nss
.nss_error
;
1393 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1394 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
, nss
.nss_error
);
1398 error
= nss
.nss_error
? nss
.nss_error
: ETIMEDOUT
;
1400 lck_mtx_lock(&nmp
->nm_lock
);
1401 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1403 lck_mtx_unlock(&nmp
->nm_lock
);
1404 if (nss
.nss_flags
& NSS_WARNED
) {
1405 log(LOG_INFO
, "nfs_connect: socket connect aborted for %s\n",
1406 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1409 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1412 NFS_ZFREE(ZV_NAMEI
, path
);
1414 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1415 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
1419 /* try to use nss_sock */
1421 nss
.nss_sock
= NULL
;
1423 /* We may be speaking to portmap first... to determine port(s). */
1424 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1425 port
= ntohs(((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
);
1426 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
1427 port
= ntohs(((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
);
1428 } else if (nso
->nso_saddr
->sa_family
== AF_LOCAL
) {
1429 if (nso
->nso_protocol
== PMAPPROG
) {
1434 if (port
== PMAPPORT
) {
1435 /* Use this portmapper port to get the port #s we need. */
1436 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1437 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1439 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1440 sock_setupcall(nso
->nso_so
, NULL
, NULL
);
1442 /* Set up socket address and port for NFS socket. */
1443 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1445 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1446 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: PVER2MAJOR(nmp
->nm_max_vers
);
1447 if (!(port
= nmp
->nm_nfsport
)) {
1448 if (ss
.ss_family
== AF_INET
) {
1449 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
1450 } else if (ss
.ss_family
== AF_INET6
) {
1451 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
1452 } else if (ss
.ss_family
== AF_LOCAL
) {
1453 if (((struct sockaddr_un
*)&ss
)->sun_path
[0] == '/') {
1454 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un
*)&ss
)->sun_path
);
1457 for (; nfsvers
>= (int)PVER2MAJOR(nmp
->nm_min_vers
); nfsvers
--) {
1458 if (nmp
->nm_vers
&& nmp
->nm_vers
!= nfsvers
) {
1459 continue; /* Wrong version */
1462 if (nfsvers
== NFS_VER4
&& nso
->nso_sotype
== SOCK_DGRAM
) {
1463 continue; /* NFSv4 does not do UDP */
1466 if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_nfs_localport
) {
1467 struct sockaddr_un
*sun
= (struct sockaddr_un
*)&ss
;
1468 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp
->nm_nfs_localport
);
1469 strlcpy(sun
->sun_path
, nmp
->nm_nfs_localport
, sizeof(sun
->sun_path
));
1472 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1473 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1474 nso
->nso_so
, NFS_PROG
, nfsvers
, nso
->nso_sotype
, timeo
);
1477 if (ss
.ss_family
== AF_INET
) {
1478 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1479 } else if (ss
.ss_family
== AF_INET6
) {
1480 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1481 } else if (ss
.ss_family
== AF_LOCAL
) {
1482 port
= ((struct sockaddr_un
*)&ss
)->sun_path
[0] ? NFS_PORT
: 0;
1485 error
= EPROGUNAVAIL
;
1488 if (port
== NFS_PORT
&& nfsvers
== NFS_VER4
&& tryv4
== 0) {
1489 continue; /* We already tried this */
1497 if (nfsvers
< (int)PVER2MAJOR(nmp
->nm_min_vers
) && error
== 0) {
1498 error
= EPROGUNAVAIL
;
1501 nfs_socket_search_update_error(&nss
, error
);
1502 nfs_socket_destroy(nso
);
1503 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers
, error
);
1506 } else if (nmp
->nm_nfs_localport
) {
1507 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_nfs_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1508 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp
->nm_nfs_localport
);
1511 /* Create NFS protocol socket and add it to the list of sockets. */
1512 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1513 if (ss
.ss_family
== AF_LOCAL
) {
1514 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un
*)&ss
)->sun_path
, port
);
1516 error
= nfs_socket_create(nmp
, (struct sockaddr
*)&ss
, nso
->nso_sotype
, port
,
1517 NFS_PROG
, nfsvers
, NMFLAG(nmp
, RESVPORT
), &nsonfs
);
1519 nfs_socket_search_update_error(&nss
, error
);
1520 nfs_socket_destroy(nso
);
1521 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error
);
1524 nsonfs
->nso_location
= nso
->nso_location
;
1525 nsonfs
->nso_wake
= &nss
;
1526 error
= sock_setupcall(nsonfs
->nso_so
, nfs_connect_upcall
, nsonfs
);
1528 nfs_socket_search_update_error(&nss
, error
);
1529 nfs_socket_destroy(nsonfs
);
1530 nfs_socket_destroy(nso
);
1531 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error
);
1534 TAILQ_INSERT_TAIL(&nss
.nss_socklist
, nsonfs
, nso_link
);
1536 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1537 /* Set up socket address and port for MOUNT socket. */
1539 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1540 port
= nmp
->nm_mountport
;
1541 NFS_SOCK_DBG("mount port = %d\n", port
);
1542 if (ss
.ss_family
== AF_INET
) {
1543 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1544 } else if (ss
.ss_family
== AF_INET6
) {
1545 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1546 } else if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_mount_localport
) {
1547 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp
->nm_mount_localport
, nmp
->nm_mountport
);
1548 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_mount_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1551 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1552 /* If NFS version is unknown, optimistically choose for NFSv3. */
1553 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1554 int mntproto
= (NM_OMFLAG(nmp
, MNTUDP
) || (nso
->nso_sotype
== SOCK_DGRAM
)) ? IPPROTO_UDP
: IPPROTO_TCP
;
1555 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso
->nso_so
);
1556 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1557 nso
->nso_so
, RPCPROG_MNT
, mntvers
, mntproto
== IPPROTO_UDP
? SOCK_DGRAM
: SOCK_STREAM
, timeo
);
1560 if (ss
.ss_family
== AF_INET
) {
1561 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1562 } else if (ss
.ss_family
== AF_INET6
) {
1563 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1564 } else if (ss
.ss_family
== AF_LOCAL
) {
1565 port
= (((struct sockaddr_un
*)&ss
)->sun_path
[0] != '\0');
1568 error
= EPROGUNAVAIL
;
1571 /* create sockaddr for MOUNT */
1573 MALLOC(nsonfs
->nso_saddr2
, struct sockaddr
*, ss
.ss_len
, M_SONAME
, M_WAITOK
| M_ZERO
);
1575 if (!error
&& !nsonfs
->nso_saddr2
) {
1579 bcopy(&ss
, nsonfs
->nso_saddr2
, ss
.ss_len
);
1582 NFS_SOCK_DBG("Could not create mount sockaet address %d", error
);
1583 lck_mtx_lock(&nsonfs
->nso_lock
);
1584 nsonfs
->nso_error
= error
;
1585 nsonfs
->nso_flags
|= NSO_DEAD
;
1586 lck_mtx_unlock(&nsonfs
->nso_lock
);
1589 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso
, nso
->nso_so
);
1590 nfs_socket_destroy(nso
);
1594 /* nso is an NFS socket */
1595 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
);
1597 /* If NFS version wasn't specified, it was determined during the connect. */
1598 nfsvers
= nmp
->nm_vers
? nmp
->nm_vers
: (int)nso
->nso_version
;
1600 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1601 if ((nfsvers
< NFS_VER4
) && !(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
) && !NM_OMATTR_GIVEN(nmp
, FH
)) {
1603 saddr
= nso
->nso_saddr2
;
1605 /* Need sockaddr for MOUNT port */
1606 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp
->nm_mountport
, nmp
->nm_mount_localport
);
1607 bcopy(nso
->nso_saddr
, &ss
, nso
->nso_saddr
->sa_len
);
1608 port
= nmp
->nm_mountport
;
1609 if (ss
.ss_family
== AF_INET
) {
1610 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(port
);
1611 } else if (ss
.ss_family
== AF_INET6
) {
1612 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(port
);
1613 } else if (ss
.ss_family
== AF_LOCAL
&& nmp
->nm_mount_localport
) {
1614 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp
->nm_mount_localport
, nmp
->nm_mountport
);
1615 strlcpy(((struct sockaddr_un
*)&ss
)->sun_path
, nmp
->nm_mount_localport
, sizeof(((struct sockaddr_un
*)&ss
)->sun_path
));
1618 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1619 int mntvers
= (nfsvers
== NFS_VER2
) ? RPCMNT_VER1
: RPCMNT_VER3
;
1620 int so_type
= NM_OMFLAG(nmp
, MNTUDP
) ? SOCK_DGRAM
: nso
->nso_sotype
;
1621 error
= nfs_portmap_lookup(nmp
, vfs_context_current(), (struct sockaddr
*)&ss
,
1622 NULL
, RPCPROG_MNT
, mntvers
, so_type
, timeo
);
1623 if (ss
.ss_family
== AF_INET
) {
1624 port
= ntohs(((struct sockaddr_in
*)&ss
)->sin_port
);
1625 } else if (ss
.ss_family
== AF_INET6
) {
1626 port
= ntohs(((struct sockaddr_in6
*)&ss
)->sin6_port
);
1631 saddr
= (struct sockaddr
*)&ss
;
1633 error
= EPROGUNAVAIL
;
1638 error
= nfs3_check_lockmode(nmp
, saddr
, nso
->nso_sotype
, timeo
);
1640 nfs_socket_search_update_error(&nss
, error
);
1641 nfs_socket_destroy(nso
);
1646 fh
= zalloc(nfs_fhandle_zone
);
1649 path
= zalloc(ZV_NAMEI
);
1651 if (!saddr
|| !fh
|| !path
) {
1656 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1659 NFS_ZFREE(ZV_NAMEI
, path
);
1661 nfs_socket_search_update_error(&nss
, error
);
1662 nfs_socket_destroy(nso
);
1665 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
, path
, MAXPATHLEN
, 1);
1666 error
= nfs3_mount_rpc(nmp
, saddr
, nso
->nso_sotype
, nfsvers
,
1667 path
, vfs_context_current(), timeo
, fh
, &nmp
->nm_servsec
);
1668 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1669 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1671 /* Make sure we can agree on a security flavor. */
1672 int o
, s
; /* indices into mount option and server security flavor lists */
1675 if ((nfsvers
== NFS_VER3
) && !nmp
->nm_servsec
.count
) {
1676 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1677 nmp
->nm_servsec
.count
= 1;
1678 nmp
->nm_servsec
.flavors
[0] = RPCAUTH_SYS
;
1680 if (nmp
->nm_sec
.count
) {
1681 /* Choose the first flavor in our list that the server supports. */
1682 if (!nmp
->nm_servsec
.count
) {
1683 /* we don't know what the server supports, just use our first choice */
1684 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[0];
1687 for (o
= 0; !found
&& (o
< nmp
->nm_sec
.count
); o
++) {
1688 for (s
= 0; !found
&& (s
< nmp
->nm_servsec
.count
); s
++) {
1689 if (nmp
->nm_sec
.flavors
[o
] == nmp
->nm_servsec
.flavors
[s
]) {
1690 nmp
->nm_auth
= nmp
->nm_sec
.flavors
[o
];
1696 /* Choose the first one we support from the server's list. */
1697 if (!nmp
->nm_servsec
.count
) {
1698 nmp
->nm_auth
= RPCAUTH_SYS
;
1701 for (s
= 0; s
< nmp
->nm_servsec
.count
; s
++) {
1702 switch (nmp
->nm_servsec
.flavors
[s
]) {
1704 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1705 if (found
&& (nmp
->nm_auth
== RPCAUTH_NONE
)) {
1714 nmp
->nm_auth
= nmp
->nm_servsec
.flavors
[s
];
1721 error
= !found
? EAUTH
: 0;
1723 NFS_ZFREE(ZV_NAMEI
, path
);
1725 nfs_socket_search_update_error(&nss
, error
);
1726 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1727 nfs_socket_destroy(nso
);
1731 NFS_ZFREE(nfs_fhandle_zone
, nmp
->nm_fh
);
1735 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_CALLUMNT
);
1738 /* put the real upcall in place */
1739 upcall
= (nso
->nso_sotype
== SOCK_STREAM
) ? nfs_tcp_rcv
: nfs_udp_rcv
;
1740 error
= sock_setupcall(nso
->nso_so
, upcall
, nmp
);
1742 nfs_socket_search_update_error(&nss
, error
);
1743 nfs_socket_destroy(nso
);
1747 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1748 /* set mntfromname to this location */
1749 if (!NM_OMATTR_GIVEN(nmp
, MNTFROM
)) {
1750 nfs_location_mntfromname(&nmp
->nm_locations
, nso
->nso_location
,
1751 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
1752 sizeof(vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
), 0);
1754 /* some negotiated values need to remain unchanged for the life of the mount */
1755 if (!nmp
->nm_sotype
) {
1756 nmp
->nm_sotype
= nso
->nso_sotype
;
1758 if (!nmp
->nm_vers
) {
1759 nmp
->nm_vers
= nfsvers
;
1761 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1762 if ((nfsvers
>= NFS_VER4
) && !NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1763 if (nso
->nso_saddr
->sa_family
== AF_INET
) {
1764 port
= ((struct sockaddr_in
*)nso
->nso_saddr
)->sin_port
= htons(port
);
1765 } else if (nso
->nso_saddr
->sa_family
== AF_INET6
) {
1766 port
= ((struct sockaddr_in6
*)nso
->nso_saddr
)->sin6_port
= htons(port
);
1770 if (port
== NFS_PORT
) {
1771 nmp
->nm_nfsport
= NFS_PORT
;
1777 /* do some version-specific pre-mount set up */
1778 if (nmp
->nm_vers
>= NFS_VER4
) {
1780 nmp
->nm_mounttime
= ((uint64_t)now
.tv_sec
<< 32) | now
.tv_usec
;
1781 if (!NMFLAG(nmp
, NOCALLBACK
)) {
1782 nfs4_mount_callback_setup(nmp
);
1788 /* Initialize NFS socket state variables */
1789 lck_mtx_lock(&nmp
->nm_lock
);
1790 nmp
->nm_srtt
[0] = nmp
->nm_srtt
[1] = nmp
->nm_srtt
[2] =
1791 nmp
->nm_srtt
[3] = (NFS_TIMEO
<< 3);
1792 nmp
->nm_sdrtt
[0] = nmp
->nm_sdrtt
[1] = nmp
->nm_sdrtt
[2] =
1793 nmp
->nm_sdrtt
[3] = 0;
1794 if (nso
->nso_sotype
== SOCK_DGRAM
) {
1795 nmp
->nm_cwnd
= NFS_MAXCWND
/ 2; /* Initial send window */
1797 } else if (nso
->nso_sotype
== SOCK_STREAM
) {
1798 nmp
->nm_timeouts
= 0;
1800 nmp
->nm_sockflags
&= ~NMSOCK_CONNECTING
;
1801 nmp
->nm_sockflags
|= NMSOCK_SETUP
;
1802 /* move the socket to the mount structure */
1804 oldsaddr
= nmp
->nm_saddr
;
1805 nmp
->nm_saddr
= nso
->nso_saddr
;
1806 lck_mtx_unlock(&nmp
->nm_lock
);
1807 error
= nfs_connect_setup(nmp
);
1808 lck_mtx_lock(&nmp
->nm_lock
);
1809 nmp
->nm_sockflags
&= ~NMSOCK_SETUP
;
1811 nmp
->nm_sockflags
|= NMSOCK_READY
;
1812 wakeup(&nmp
->nm_sockflags
);
1815 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1816 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, nso
, error
);
1817 nfs_socket_search_update_error(&nss
, error
);
1818 nmp
->nm_saddr
= oldsaddr
;
1819 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1820 /* undo settings made prior to setup */
1821 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_SOCKET_TYPE
)) {
1825 if (nmp
->nm_vers
>= NFS_VER4
) {
1826 if (!NFS_BITMAP_ISSET(nmp
->nm_mattrs
, NFS_MATTR_NFS_PORT
)) {
1827 nmp
->nm_nfsport
= 0;
1830 nfs4_mount_callback_shutdown(nmp
);
1832 if (IS_VALID_CRED(nmp
->nm_mcred
)) {
1833 kauth_cred_unref(&nmp
->nm_mcred
);
1835 bzero(&nmp
->nm_un
, sizeof(nmp
->nm_un
));
1840 lck_mtx_unlock(&nmp
->nm_lock
);
1842 nfs_socket_destroy(nso
);
1846 /* update current location */
1847 if ((nmp
->nm_locations
.nl_current
.nli_flags
& NLI_VALID
) &&
1848 (nmp
->nm_locations
.nl_current
.nli_serv
!= nso
->nso_location
.nli_serv
)) {
1849 /* server has changed, we should initiate failover/recovery */
1852 nmp
->nm_locations
.nl_current
= nso
->nso_location
;
1853 nmp
->nm_locations
.nl_current
.nli_flags
|= NLI_VALID
;
1855 if (!(nmp
->nm_sockflags
& NMSOCK_HASCONNECTED
)) {
1856 /* We have now successfully connected... make a note of it. */
1857 nmp
->nm_sockflags
|= NMSOCK_HASCONNECTED
;
1860 lck_mtx_unlock(&nmp
->nm_lock
);
1862 FREE(oldsaddr
, M_SONAME
);
1865 if (nss
.nss_flags
& NSS_WARNED
) {
1866 log(LOG_INFO
, "nfs_connect: socket connect completed for %s\n",
1867 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1871 nfs_socket_search_cleanup(&nss
);
1873 NFS_ZFREE(nfs_fhandle_zone
, fh
);
1876 NFS_ZFREE(ZV_NAMEI
, path
);
1878 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
1883 /* setup & confirm socket connection is functional */
1889 struct nfsmount
*nmp
)
1893 if (nmp
->nm_vers
>= NFS_VER4
) {
1894 if (nmp
->nm_state
& NFSSTA_CLIENTID
) {
1895 /* first, try to renew our current state */
1896 error
= nfs4_renew(nmp
, R_SETUP
);
1897 if ((error
== NFSERR_ADMIN_REVOKED
) ||
1898 (error
== NFSERR_CB_PATH_DOWN
) ||
1899 (error
== NFSERR_EXPIRED
) ||
1900 (error
== NFSERR_LEASE_MOVED
) ||
1901 (error
== NFSERR_STALE_CLIENTID
)) {
1902 lck_mtx_lock(&nmp
->nm_lock
);
1903 nfs_need_recover(nmp
, error
);
1904 lck_mtx_unlock(&nmp
->nm_lock
);
1907 error
= nfs4_setclientid(nmp
);
1914 * NFS socket reconnect routine:
1915 * Called when a connection is broken.
1916 * - disconnect the old socket
1917 * - nfs_connect() again
1918 * - set R_MUSTRESEND for all outstanding requests on mount point
1919 * If this fails the mount point is DEAD!
1922 nfs_reconnect(struct nfsmount
*nmp
)
1926 thread_t thd
= current_thread();
1927 int error
, wentdown
= 0, verbose
= 1;
1932 lastmsg
= now
.tv_sec
- (nmp
->nm_tprintf_delay
- nmp
->nm_tprintf_initial_delay
);
1934 nfs_disconnect(nmp
);
1937 lck_mtx_lock(&nmp
->nm_lock
);
1938 timeo
= nfs_is_squishy(nmp
) ? 8 : 30;
1939 lck_mtx_unlock(&nmp
->nm_lock
);
1941 while ((error
= nfs_connect(nmp
, verbose
, timeo
))) {
1943 nfs_disconnect(nmp
);
1944 if ((error
== EINTR
) || (error
== ERESTART
)) {
1951 if ((lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
) {
1952 lastmsg
= now
.tv_sec
;
1953 nfs_down(nmp
, thd
, error
, NFSSTA_TIMEO
, "can not connect", 0);
1956 lck_mtx_lock(&nmp
->nm_lock
);
1957 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
1958 /* we're not yet completely mounted and */
1959 /* we can't reconnect, so we fail */
1960 lck_mtx_unlock(&nmp
->nm_lock
);
1961 NFS_SOCK_DBG("Not mounted returning %d\n", error
);
1965 if (nfs_mount_check_dead_timeout(nmp
)) {
1966 nfs_mount_make_zombie(nmp
);
1967 lck_mtx_unlock(&nmp
->nm_lock
);
1971 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 1))) {
1972 lck_mtx_unlock(&nmp
->nm_lock
);
1975 lck_mtx_unlock(&nmp
->nm_lock
);
1976 tsleep(nfs_reconnect
, PSOCK
, "nfs_reconnect_delay", 2 * hz
);
1977 if ((error
= nfs_sigintr(nmp
, NULL
, thd
, 0))) {
1983 nfs_up(nmp
, thd
, NFSSTA_TIMEO
, "connected");
1987 * Loop through outstanding request list and mark all requests
1988 * as needing a resend. (Though nfs_need_reconnect() probably
1989 * marked them all already.)
1991 lck_mtx_lock(nfs_request_mutex
);
1992 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
1993 if (rq
->r_nmp
== nmp
) {
1994 lck_mtx_lock(&rq
->r_mtx
);
1995 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
1996 rq
->r_flags
|= R_MUSTRESEND
;
1999 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
2000 nfs_asyncio_resend(rq
);
2003 lck_mtx_unlock(&rq
->r_mtx
);
2006 lck_mtx_unlock(nfs_request_mutex
);
2011 * NFS disconnect. Clean up and unlink.
2014 nfs_disconnect(struct nfsmount
*nmp
)
2016 struct nfs_socket
*nso
;
2018 lck_mtx_lock(&nmp
->nm_lock
);
2021 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2022 if (nmp
->nm_state
& NFSSTA_SENDING
) { /* wait for sending to complete */
2023 nmp
->nm_state
|= NFSSTA_WANTSND
;
2024 msleep(&nmp
->nm_state
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitsending", &ts
);
2027 if (nmp
->nm_sockflags
& NMSOCK_POKE
) { /* wait for poking to complete */
2028 msleep(&nmp
->nm_sockflags
, &nmp
->nm_lock
, PZERO
- 1, "nfswaitpoke", &ts
);
2031 nmp
->nm_sockflags
|= NMSOCK_DISCONNECTING
;
2032 nmp
->nm_sockflags
&= ~NMSOCK_READY
;
2035 if (nso
->nso_saddr
== nmp
->nm_saddr
) {
2036 nso
->nso_saddr
= NULL
;
2038 lck_mtx_unlock(&nmp
->nm_lock
);
2039 nfs_socket_destroy(nso
);
2040 lck_mtx_lock(&nmp
->nm_lock
);
2041 nmp
->nm_sockflags
&= ~NMSOCK_DISCONNECTING
;
2042 lck_mtx_unlock(&nmp
->nm_lock
);
2044 lck_mtx_unlock(&nmp
->nm_lock
);
2049 * mark an NFS mount as needing a reconnect/resends.
2052 nfs_need_reconnect(struct nfsmount
*nmp
)
2056 lck_mtx_lock(&nmp
->nm_lock
);
2057 nmp
->nm_sockflags
&= ~(NMSOCK_READY
| NMSOCK_SETUP
);
2058 lck_mtx_unlock(&nmp
->nm_lock
);
2061 * Loop through outstanding request list and
2062 * mark all requests as needing a resend.
2064 lck_mtx_lock(nfs_request_mutex
);
2065 TAILQ_FOREACH(rq
, &nfs_reqq
, r_chain
) {
2066 if (rq
->r_nmp
== nmp
) {
2067 lck_mtx_lock(&rq
->r_mtx
);
2068 if (!rq
->r_error
&& !rq
->r_nmrep
.nmc_mhead
&& !(rq
->r_flags
& R_MUSTRESEND
)) {
2069 rq
->r_flags
|= R_MUSTRESEND
;
2072 if ((rq
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
2073 nfs_asyncio_resend(rq
);
2076 lck_mtx_unlock(&rq
->r_mtx
);
2079 lck_mtx_unlock(nfs_request_mutex
);
2084 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2087 nfs_mount_sock_thread(void *arg
, __unused wait_result_t wr
)
2089 struct nfsmount
*nmp
= arg
;
2090 struct timespec ts
= { .tv_sec
= 30, .tv_nsec
= 0 };
2091 thread_t thd
= current_thread();
2094 int error
, dofinish
;
2096 int do_reconnect_sleep
= 0;
2098 lck_mtx_lock(&nmp
->nm_lock
);
2099 while (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2100 !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
2101 !LIST_EMPTY(&nmp
->nm_monlist
) ||
2102 nmp
->nm_deadto_start
||
2103 (nmp
->nm_state
& NFSSTA_RECOVER
) ||
2104 ((nmp
->nm_vers
>= NFS_VER4
) && !TAILQ_EMPTY(&nmp
->nm_dreturnq
))) {
2105 if (nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) {
2108 /* do reconnect, if necessary */
2109 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2110 if (nmp
->nm_reconnect_start
<= 0) {
2112 nmp
->nm_reconnect_start
= now
.tv_sec
;
2114 lck_mtx_unlock(&nmp
->nm_lock
);
2115 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
2117 * XXX We don't want to call reconnect again right away if returned errors
2118 * before that may not have blocked. This has caused spamming null procs
2119 * from machines in the pass.
2121 if (do_reconnect_sleep
) {
2122 tsleep(nfs_mount_sock_thread
, PSOCK
, "nfs_reconnect_sock_thread_delay", hz
);
2124 error
= nfs_reconnect(nmp
);
2127 if (error
== EIO
|| error
== EINTR
) {
2128 lvl
= (do_reconnect_sleep
++ % 600) ? 7 : 0;
2130 NFS_DBG(NFS_FAC_SOCK
, lvl
, "nfs reconnect %s: returned %d\n",
2131 vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, error
);
2133 nmp
->nm_reconnect_start
= 0;
2134 do_reconnect_sleep
= 0;
2136 lck_mtx_lock(&nmp
->nm_lock
);
2138 if ((nmp
->nm_sockflags
& NMSOCK_READY
) &&
2139 (nmp
->nm_state
& NFSSTA_RECOVER
) &&
2140 !(nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2141 !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2142 /* perform state recovery */
2143 lck_mtx_unlock(&nmp
->nm_lock
);
2145 lck_mtx_lock(&nmp
->nm_lock
);
2148 /* handle NFSv4 delegation returns */
2149 while ((nmp
->nm_vers
>= NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
2150 (nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
) &&
2151 ((np
= TAILQ_FIRST(&nmp
->nm_dreturnq
)))) {
2152 lck_mtx_unlock(&nmp
->nm_lock
);
2153 nfs4_delegation_return(np
, R_RECOVER
, thd
, nmp
->nm_mcred
);
2154 lck_mtx_lock(&nmp
->nm_lock
);
2157 /* do resends, if necessary/possible */
2158 while ((((nmp
->nm_sockflags
& NMSOCK_READY
) && !(nmp
->nm_state
& NFSSTA_RECOVER
)) ||
2159 (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) &&
2160 ((req
= TAILQ_FIRST(&nmp
->nm_resendq
)))) {
2161 if (req
->r_resendtime
) {
2164 while (req
&& !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) && req
->r_resendtime
&& (now
.tv_sec
< req
->r_resendtime
)) {
2165 req
= TAILQ_NEXT(req
, r_rchain
);
2170 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2171 lck_mtx_unlock(&nmp
->nm_lock
);
2172 lck_mtx_lock(&req
->r_mtx
);
2173 lck_mtx_lock(&nmp
->nm_lock
);
2174 if ((req
->r_flags
& R_RESENDQ
) == 0 || (req
->r_rchain
.tqe_next
== NFSREQNOLIST
)) {
2175 lck_mtx_unlock(&req
->r_mtx
);
2178 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
2179 req
->r_flags
&= ~R_RESENDQ
;
2180 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
2181 lck_mtx_unlock(&nmp
->nm_lock
);
2182 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2183 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
2184 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2186 lck_mtx_unlock(&req
->r_mtx
);
2188 nfs_asyncio_finish(req
);
2190 nfs_request_rele(req
);
2191 lck_mtx_lock(&nmp
->nm_lock
);
2194 if ((req
->r_flags
& R_RESTART
) || nfs_request_using_gss(req
)) {
2195 req
->r_flags
&= ~R_RESTART
;
2196 req
->r_resendtime
= 0;
2197 lck_mtx_unlock(&req
->r_mtx
);
2198 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2199 nfs_reqdequeue(req
);
2201 if (nfs_request_using_gss(req
)) {
2202 nfs_gss_clnt_rpcdone(req
);
2203 error
= nfs_gss_clnt_args_restore(req
);
2204 if (error
== ENEEDAUTH
) {
2208 #endif /* CONFIG_NFS_GSS */
2209 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2210 nfs_request_using_gss(req
) ? " gss" : "", req
->r_procnum
, req
->r_xid
,
2211 req
->r_flags
, req
->r_rtt
);
2212 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2214 error
= nfs_request_add_header(req
);
2217 error
= nfs_request_send(req
, 0);
2219 lck_mtx_lock(&req
->r_mtx
);
2221 req
->r_error
= error
;
2224 dofinish
= error
&& req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2225 lck_mtx_unlock(&req
->r_mtx
);
2227 nfs_asyncio_finish(req
);
2229 nfs_request_rele(req
);
2230 lck_mtx_lock(&nmp
->nm_lock
);
2234 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2235 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
2236 error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0);
2238 req
->r_flags
|= R_SENDING
;
2239 lck_mtx_unlock(&req
->r_mtx
);
2240 error
= nfs_send(req
, 0);
2241 lck_mtx_lock(&req
->r_mtx
);
2244 lck_mtx_unlock(&req
->r_mtx
);
2245 nfs_request_rele(req
);
2246 lck_mtx_lock(&nmp
->nm_lock
);
2250 req
->r_error
= error
;
2252 dofinish
= req
->r_callback
.rcb_func
&& !(req
->r_flags
& R_WAITSENT
);
2253 lck_mtx_unlock(&req
->r_mtx
);
2255 nfs_asyncio_finish(req
);
2257 nfs_request_rele(req
);
2258 lck_mtx_lock(&nmp
->nm_lock
);
2260 if (nfs_mount_check_dead_timeout(nmp
)) {
2261 nfs_mount_make_zombie(nmp
);
2265 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
2268 /* check monitored nodes, if necessary/possible */
2269 if (!LIST_EMPTY(&nmp
->nm_monlist
)) {
2270 nmp
->nm_state
|= NFSSTA_MONITOR_SCAN
;
2271 LIST_FOREACH(np
, &nmp
->nm_monlist
, n_monlink
) {
2272 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2273 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2276 np
->n_mflag
|= NMMONSCANINPROG
;
2277 lck_mtx_unlock(&nmp
->nm_lock
);
2278 error
= nfs_getattr(np
, NULL
, vfs_context_kernel(), (NGA_UNCACHED
| NGA_MONITOR
));
2279 if (!error
&& ISSET(np
->n_flag
, NUPDATESIZE
)) { /* update quickly to avoid multiple events */
2280 nfs_data_update_size(np
, 0);
2282 lck_mtx_lock(&nmp
->nm_lock
);
2283 np
->n_mflag
&= ~NMMONSCANINPROG
;
2284 if (np
->n_mflag
& NMMONSCANWANT
) {
2285 np
->n_mflag
&= ~NMMONSCANWANT
;
2286 wakeup(&np
->n_mflag
);
2288 if (error
|| !(nmp
->nm_sockflags
& NMSOCK_READY
) ||
2289 (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
| NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2293 nmp
->nm_state
&= ~NFSSTA_MONITOR_SCAN
;
2294 if (nmp
->nm_state
& NFSSTA_UNMOUNTING
) {
2295 wakeup(&nmp
->nm_state
); /* let unmounting thread know scan is done */
2298 if ((nmp
->nm_sockflags
& NMSOCK_READY
) || (nmp
->nm_state
& (NFSSTA_RECOVER
| NFSSTA_UNMOUNTING
))) {
2299 if (nmp
->nm_deadto_start
|| !TAILQ_EMPTY(&nmp
->nm_resendq
) ||
2300 (nmp
->nm_state
& NFSSTA_RECOVER
)) {
2305 msleep(&nmp
->nm_sockthd
, &nmp
->nm_lock
, PSOCK
, "nfssockthread", &ts
);
2309 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2310 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) &&
2311 (nmp
->nm_state
& NFSSTA_MOUNTED
) && NMFLAG(nmp
, CALLUMNT
) &&
2312 (nmp
->nm_vers
< NFS_VER4
) && !(nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
))) {
2313 lck_mtx_unlock(&nmp
->nm_lock
);
2314 nfs3_umount_rpc(nmp
, vfs_context_kernel(),
2315 (nmp
->nm_sockflags
& NMSOCK_READY
) ? 6 : 2);
2316 lck_mtx_lock(&nmp
->nm_lock
);
2319 if (nmp
->nm_sockthd
== thd
) {
2320 nmp
->nm_sockthd
= NULL
;
2322 lck_mtx_unlock(&nmp
->nm_lock
);
2323 wakeup(&nmp
->nm_sockthd
);
2324 thread_terminate(thd
);
2327 /* start or wake a mount's socket thread */
2329 nfs_mount_sock_thread_wake(struct nfsmount
*nmp
)
2331 if (nmp
->nm_sockthd
) {
2332 wakeup(&nmp
->nm_sockthd
);
2333 } else if (kernel_thread_start(nfs_mount_sock_thread
, nmp
, &nmp
->nm_sockthd
) == KERN_SUCCESS
) {
2334 thread_deallocate(nmp
->nm_sockthd
);
2339 * Check if we should mark the mount dead because the
2340 * unresponsive mount has reached the dead timeout.
2341 * (must be called with nmp locked)
2344 nfs_mount_check_dead_timeout(struct nfsmount
*nmp
)
2348 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2351 if (nmp
->nm_deadto_start
== 0) {
2354 nfs_is_squishy(nmp
);
2355 if (nmp
->nm_curdeadtimeout
<= 0) {
2359 if ((now
.tv_sec
- nmp
->nm_deadto_start
) < nmp
->nm_curdeadtimeout
) {
2366 * Call nfs_mount_zombie to remove most of the
2367 * nfs state for the mount, and then ask to be forcibly unmounted.
2369 * Assumes the nfs mount structure lock nm_lock is held.
2373 nfs_mount_make_zombie(struct nfsmount
*nmp
)
2381 if (nmp
->nm_state
& NFSSTA_DEAD
) {
2385 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
2386 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
2387 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
2388 lck_mtx_unlock(&nmp
->nm_lock
);
2389 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
2390 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
2391 lck_mtx_lock(&nmp
->nm_lock
);
2396 * NFS callback channel socket state
2398 struct nfs_callback_socket
{
2399 TAILQ_ENTRY(nfs_callback_socket
) ncbs_link
;
2400 socket_t ncbs_so
; /* the socket */
2401 struct sockaddr_storage ncbs_saddr
; /* socket address */
2402 struct nfs_rpc_record_state ncbs_rrs
; /* RPC record parsing state */
2403 time_t ncbs_stamp
; /* last accessed at */
2404 uint32_t ncbs_flags
; /* see below */
2406 #define NCBSOCK_UPCALL 0x0001
2407 #define NCBSOCK_UPCALLWANT 0x0002
2408 #define NCBSOCK_DEAD 0x0004
2412 * NFS callback channel state
2414 * One listening socket for accepting socket connections from servers and
2415 * a list of connected sockets to handle callback requests on.
2416 * Mounts registered with the callback channel are assigned IDs and
2417 * put on a list so that the callback request handling code can match
2418 * the requests up with mounts.
2420 socket_t nfs4_cb_so
= NULL
;
2421 socket_t nfs4_cb_so6
= NULL
;
2422 in_port_t nfs4_cb_port
= 0;
2423 in_port_t nfs4_cb_port6
= 0;
2424 uint32_t nfs4_cb_id
= 0;
2425 uint32_t nfs4_cb_so_usecount
= 0;
2426 TAILQ_HEAD(nfs4_cb_sock_list
, nfs_callback_socket
) nfs4_cb_socks
;
2427 TAILQ_HEAD(nfs4_cb_mount_list
, nfsmount
) nfs4_cb_mounts
;
2429 int nfs4_cb_handler(struct nfs_callback_socket
*, mbuf_t
);
2432 * Set up the callback channel for the NFS mount.
2434 * Initializes the callback channel socket state and
2435 * assigns a callback ID to the mount.
2438 nfs4_mount_callback_setup(struct nfsmount
*nmp
)
2440 struct sockaddr_in sin
;
2441 struct sockaddr_in6 sin6
;
2443 socket_t so6
= NULL
;
2444 struct timeval timeo
;
2448 lck_mtx_lock(nfs_global_mutex
);
2449 if (nfs4_cb_id
== 0) {
2450 TAILQ_INIT(&nfs4_cb_mounts
);
2451 TAILQ_INIT(&nfs4_cb_socks
);
2454 nmp
->nm_cbid
= nfs4_cb_id
++;
2455 if (nmp
->nm_cbid
== 0) {
2456 nmp
->nm_cbid
= nfs4_cb_id
++;
2458 nfs4_cb_so_usecount
++;
2459 TAILQ_INSERT_HEAD(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2462 lck_mtx_unlock(nfs_global_mutex
);
2467 error
= sock_socket(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so
);
2469 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv4 socket\n", error
);
2474 if (NFS_PORT_INVALID(nfs_callback_port
)) {
2476 log(LOG_INFO
, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error
, nfs_callback_port
);
2480 sock_setsockopt(so
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2481 sin
.sin_len
= sizeof(struct sockaddr_in
);
2482 sin
.sin_family
= AF_INET
;
2483 sin
.sin_addr
.s_addr
= htonl(INADDR_ANY
);
2484 sin
.sin_port
= htons((in_port_t
)nfs_callback_port
); /* try to use specified port */
2485 error
= sock_bind(so
, (struct sockaddr
*)&sin
);
2487 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv4 socket\n", error
);
2490 error
= sock_getsockname(so
, (struct sockaddr
*)&sin
, sin
.sin_len
);
2492 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv4 socket port\n", error
);
2495 nfs4_cb_port
= ntohs(sin
.sin_port
);
2497 error
= sock_listen(so
, 32);
2499 log(LOG_INFO
, "nfs callback setup: error %d on IPv4 listen\n", error
);
2503 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2506 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2508 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error
);
2510 error
= sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2512 log(LOG_INFO
, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error
);
2514 sock_setsockopt(so
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2515 sock_setsockopt(so
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2516 sock_setsockopt(so
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2520 error
= sock_socket(AF_INET6
, SOCK_STREAM
, IPPROTO_TCP
, nfs4_cb_accept
, NULL
, &nfs4_cb_so6
);
2522 log(LOG_INFO
, "nfs callback setup: error %d creating listening IPv6 socket\n", error
);
2527 sock_setsockopt(so6
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2528 sock_setsockopt(so6
, IPPROTO_IPV6
, IPV6_V6ONLY
, &on
, sizeof(on
));
2529 /* try to use specified port or same port as IPv4 */
2530 port
= nfs_callback_port
? (in_port_t
)nfs_callback_port
: nfs4_cb_port
;
2532 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
2533 sin6
.sin6_family
= AF_INET6
;
2534 sin6
.sin6_addr
= in6addr_any
;
2535 sin6
.sin6_port
= htons(port
);
2536 error
= sock_bind(so6
, (struct sockaddr
*)&sin6
);
2538 if (port
!= nfs_callback_port
) {
2539 /* if we simply tried to match the IPv4 port, then try any port */
2541 goto ipv6_bind_again
;
2543 log(LOG_INFO
, "nfs callback setup: error %d binding listening IPv6 socket\n", error
);
2546 error
= sock_getsockname(so6
, (struct sockaddr
*)&sin6
, sin6
.sin6_len
);
2548 log(LOG_INFO
, "nfs callback setup: error %d getting listening IPv6 socket port\n", error
);
2551 nfs4_cb_port6
= ntohs(sin6
.sin6_port
);
2553 error
= sock_listen(so6
, 32);
2555 log(LOG_INFO
, "nfs callback setup: error %d on IPv6 listen\n", error
);
2559 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2562 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2564 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error
);
2566 error
= sock_setsockopt(so6
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2568 log(LOG_INFO
, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error
);
2570 sock_setsockopt(so6
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2571 sock_setsockopt(so6
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2572 sock_setsockopt(so6
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2577 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2578 lck_mtx_unlock(nfs_global_mutex
);
2580 sock_shutdown(so
, SHUT_RDWR
);
2584 sock_shutdown(so6
, SHUT_RDWR
);
2588 lck_mtx_unlock(nfs_global_mutex
);
2593 * Shut down the callback channel for the NFS mount.
2595 * Clears the mount's callback ID and releases the mounts
2596 * reference on the callback socket. Last reference dropped
2597 * will also shut down the callback socket(s).
2600 nfs4_mount_callback_shutdown(struct nfsmount
*nmp
)
2602 struct nfs_callback_socket
*ncbsp
;
2604 struct nfs4_cb_sock_list cb_socks
;
2605 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2607 lck_mtx_lock(nfs_global_mutex
);
2608 if (nmp
->nm_cbid
== 0) {
2609 lck_mtx_unlock(nfs_global_mutex
);
2612 TAILQ_REMOVE(&nfs4_cb_mounts
, nmp
, nm_cblink
);
2613 /* wait for any callbacks in progress to complete */
2614 while (nmp
->nm_cbrefs
) {
2615 msleep(&nmp
->nm_cbrefs
, nfs_global_mutex
, PSOCK
, "cbshutwait", &ts
);
2618 if (--nfs4_cb_so_usecount
) {
2619 lck_mtx_unlock(nfs_global_mutex
);
2624 nfs4_cb_so
= nfs4_cb_so6
= NULL
;
2625 TAILQ_INIT(&cb_socks
);
2626 TAILQ_CONCAT(&cb_socks
, &nfs4_cb_socks
, ncbs_link
);
2627 lck_mtx_unlock(nfs_global_mutex
);
2629 sock_shutdown(so
, SHUT_RDWR
);
2633 sock_shutdown(so6
, SHUT_RDWR
);
2636 while ((ncbsp
= TAILQ_FIRST(&cb_socks
))) {
2637 TAILQ_REMOVE(&cb_socks
, ncbsp
, ncbs_link
);
2638 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2639 sock_close(ncbsp
->ncbs_so
);
2640 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2641 FREE(ncbsp
, M_TEMP
);
2646 * Check periodically for stale/unused nfs callback sockets
2648 #define NFS4_CB_TIMER_PERIOD 30
2649 #define NFS4_CB_IDLE_MAX 300
2651 nfs4_callback_timer(__unused
void *param0
, __unused
void *param1
)
2653 struct nfs_callback_socket
*ncbsp
, *nextncbsp
;
2657 lck_mtx_lock(nfs_global_mutex
);
2658 if (TAILQ_EMPTY(&nfs4_cb_socks
)) {
2659 nfs4_callback_timer_on
= 0;
2660 lck_mtx_unlock(nfs_global_mutex
);
2664 TAILQ_FOREACH_SAFE(ncbsp
, &nfs4_cb_socks
, ncbs_link
, nextncbsp
) {
2665 if (!(ncbsp
->ncbs_flags
& NCBSOCK_DEAD
) &&
2666 (now
.tv_sec
< (ncbsp
->ncbs_stamp
+ NFS4_CB_IDLE_MAX
))) {
2669 TAILQ_REMOVE(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2670 lck_mtx_unlock(nfs_global_mutex
);
2671 sock_shutdown(ncbsp
->ncbs_so
, SHUT_RDWR
);
2672 sock_close(ncbsp
->ncbs_so
);
2673 nfs_rpc_record_state_cleanup(&ncbsp
->ncbs_rrs
);
2674 FREE(ncbsp
, M_TEMP
);
2677 nfs4_callback_timer_on
= 1;
2678 nfs_interval_timer_start(nfs4_callback_timer_call
,
2679 NFS4_CB_TIMER_PERIOD
* 1000);
2680 lck_mtx_unlock(nfs_global_mutex
);
2684 * Accept a new callback socket.
2687 nfs4_cb_accept(socket_t so
, __unused
void *arg
, __unused
int waitflag
)
2689 socket_t newso
= NULL
;
2690 struct nfs_callback_socket
*ncbsp
;
2691 struct nfsmount
*nmp
;
2692 struct timeval timeo
, now
;
2693 int error
, on
= 1, ip
;
2695 if (so
== nfs4_cb_so
) {
2697 } else if (so
== nfs4_cb_so6
) {
2703 /* allocate/initialize a new nfs_callback_socket */
2704 MALLOC(ncbsp
, struct nfs_callback_socket
*, sizeof(struct nfs_callback_socket
), M_TEMP
, M_WAITOK
);
2706 log(LOG_ERR
, "nfs callback accept: no memory for new socket\n");
2709 bzero(ncbsp
, sizeof(*ncbsp
));
2710 ncbsp
->ncbs_saddr
.ss_len
= (ip
== 4) ? sizeof(struct sockaddr_in
) : sizeof(struct sockaddr_in6
);
2711 nfs_rpc_record_state_init(&ncbsp
->ncbs_rrs
);
2713 /* accept a new socket */
2714 error
= sock_accept(so
, (struct sockaddr
*)&ncbsp
->ncbs_saddr
,
2715 ncbsp
->ncbs_saddr
.ss_len
, MSG_DONTWAIT
,
2716 nfs4_cb_rcv
, ncbsp
, &newso
);
2718 log(LOG_INFO
, "nfs callback accept: error %d accepting IPv%d socket\n", error
, ip
);
2719 FREE(ncbsp
, M_TEMP
);
2723 /* set up the new socket */
2724 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2727 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &timeo
, sizeof(timeo
));
2729 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error
, ip
);
2731 error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &timeo
, sizeof(timeo
));
2733 log(LOG_INFO
, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error
, ip
);
2735 sock_setsockopt(newso
, IPPROTO_TCP
, TCP_NODELAY
, &on
, sizeof(on
));
2736 sock_setsockopt(newso
, SOL_SOCKET
, SO_REUSEADDR
, &on
, sizeof(on
));
2737 sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
));
2738 sock_setsockopt(newso
, SOL_SOCKET
, SO_UPCALLCLOSEWAIT
, &on
, sizeof(on
));
2740 ncbsp
->ncbs_so
= newso
;
2742 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2744 lck_mtx_lock(nfs_global_mutex
);
2746 /* add it to the list */
2747 TAILQ_INSERT_HEAD(&nfs4_cb_socks
, ncbsp
, ncbs_link
);
2749 /* verify it's from a host we have mounted */
2750 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2751 /* check if socket's source address matches this mount's server address */
2752 if (!nmp
->nm_saddr
) {
2755 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2759 if (!nmp
) { /* we don't want this socket, mark it dead */
2760 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2763 /* make sure the callback socket cleanup timer is running */
2764 /* (shorten the timer if we've got a socket we don't want) */
2765 if (!nfs4_callback_timer_on
) {
2766 nfs4_callback_timer_on
= 1;
2767 nfs_interval_timer_start(nfs4_callback_timer_call
,
2768 !nmp
? 500 : (NFS4_CB_TIMER_PERIOD
* 1000));
2769 } else if (!nmp
&& (nfs4_callback_timer_on
< 2)) {
2770 nfs4_callback_timer_on
= 2;
2771 thread_call_cancel(nfs4_callback_timer_call
);
2772 nfs_interval_timer_start(nfs4_callback_timer_call
, 500);
2775 lck_mtx_unlock(nfs_global_mutex
);
2779 * Receive mbufs from callback sockets into RPC records and process each record.
2780 * Detect connection has been closed and shut down.
2783 nfs4_cb_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
2785 struct nfs_callback_socket
*ncbsp
= arg
;
2786 struct timespec ts
= { .tv_sec
= 1, .tv_nsec
= 0 };
2789 int error
= 0, recv
= 1;
2791 lck_mtx_lock(nfs_global_mutex
);
2792 while (ncbsp
->ncbs_flags
& NCBSOCK_UPCALL
) {
2793 /* wait if upcall is already in progress */
2794 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALLWANT
;
2795 msleep(ncbsp
, nfs_global_mutex
, PSOCK
, "cbupcall", &ts
);
2797 ncbsp
->ncbs_flags
|= NCBSOCK_UPCALL
;
2798 lck_mtx_unlock(nfs_global_mutex
);
2800 /* loop while we make error-free progress */
2801 while (!error
&& recv
) {
2802 error
= nfs_rpc_record_read(so
, &ncbsp
->ncbs_rrs
, MSG_DONTWAIT
, &recv
, &m
);
2803 if (m
) { /* handle the request */
2804 error
= nfs4_cb_handler(ncbsp
, m
);
2808 /* note: no error and no data indicates server closed its end */
2809 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
2811 * Socket is either being closed or should be.
2812 * We can't close the socket in the context of the upcall.
2813 * So we mark it as dead and leave it for the cleanup timer to reap.
2815 ncbsp
->ncbs_stamp
= 0;
2816 ncbsp
->ncbs_flags
|= NCBSOCK_DEAD
;
2819 ncbsp
->ncbs_stamp
= now
.tv_sec
;
2822 lck_mtx_lock(nfs_global_mutex
);
2823 ncbsp
->ncbs_flags
&= ~NCBSOCK_UPCALL
;
2824 lck_mtx_unlock(nfs_global_mutex
);
2829 * Handle an NFS callback channel request.
2832 nfs4_cb_handler(struct nfs_callback_socket
*ncbsp
, mbuf_t mreq
)
2834 socket_t so
= ncbsp
->ncbs_so
;
2835 struct nfsm_chain nmreq
, nmrep
;
2836 mbuf_t mhead
= NULL
, mrest
= NULL
, m
;
2838 struct nfsmount
*nmp
;
2841 nfs_stateid stateid
;
2842 uint32_t bitmap
[NFS_ATTR_BITMAP_LEN
], rbitmap
[NFS_ATTR_BITMAP_LEN
], bmlen
, truncate
, attrbytes
;
2843 uint32_t val
, xid
, procnum
, taglen
, cbid
, numops
, op
, status
;
2844 uint32_t auth_type
, auth_len
;
2845 uint32_t numres
, *pnumres
;
2846 int error
= 0, replen
, len
;
2849 xid
= numops
= op
= status
= procnum
= taglen
= cbid
= 0;
2850 fh
= zalloc(nfs_fhandle_zone
);
2852 nfsm_chain_dissect_init(error
, &nmreq
, mreq
);
2853 nfsm_chain_get_32(error
, &nmreq
, xid
); // RPC XID
2854 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Call
2855 nfsm_assert(error
, (val
== RPC_CALL
), EBADRPC
);
2856 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Version
2857 nfsm_assert(error
, (val
== RPC_VER2
), ERPCMISMATCH
);
2858 nfsm_chain_get_32(error
, &nmreq
, val
); // RPC Program Number
2859 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG
), EPROGUNAVAIL
);
2860 nfsm_chain_get_32(error
, &nmreq
, val
); // NFS Callback Program Version Number
2861 nfsm_assert(error
, (val
== NFS4_CALLBACK_PROG_VERSION
), EPROGMISMATCH
);
2862 nfsm_chain_get_32(error
, &nmreq
, procnum
); // NFS Callback Procedure Number
2863 nfsm_assert(error
, (procnum
<= NFSPROC4_CB_COMPOUND
), EPROCUNAVAIL
);
2865 /* Handle authentication */
2866 /* XXX just ignore auth for now - handling kerberos may be tricky */
2867 nfsm_chain_get_32(error
, &nmreq
, auth_type
); // RPC Auth Flavor
2868 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // RPC Auth Length
2869 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2870 if (!error
&& (auth_len
> 0)) {
2871 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2873 nfsm_chain_adv(error
, &nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
2874 nfsm_chain_get_32(error
, &nmreq
, auth_len
); // verifier length
2875 nfsm_assert(error
, (auth_len
<= RPCAUTH_MAXSIZ
), EBADRPC
);
2876 if (!error
&& (auth_len
> 0)) {
2877 nfsm_chain_adv(error
, &nmreq
, nfsm_rndup(auth_len
));
2886 case NFSPROC4_CB_NULL
:
2887 status
= NFSERR_RETVOID
;
2889 case NFSPROC4_CB_COMPOUND
:
2890 /* tag, minorversion, cb ident, numops, op array */
2891 nfsm_chain_get_32(error
, &nmreq
, taglen
); /* tag length */
2892 nfsm_assert(error
, (val
<= NFS4_OPAQUE_LIMIT
), EBADRPC
);
2894 /* start building the body of the response */
2895 nfsm_mbuf_get(error
, &mrest
, nfsm_rndup(taglen
) + 5 * NFSX_UNSIGNED
);
2896 nfsm_chain_init(&nmrep
, mrest
);
2898 /* copy tag from request to response */
2899 nfsm_chain_add_32(error
, &nmrep
, taglen
); /* tag length */
2900 for (len
= (int)taglen
; !error
&& (len
> 0); len
-= NFSX_UNSIGNED
) {
2901 nfsm_chain_get_32(error
, &nmreq
, val
);
2902 nfsm_chain_add_32(error
, &nmrep
, val
);
2905 /* insert number of results placeholder */
2907 nfsm_chain_add_32(error
, &nmrep
, numres
);
2908 pnumres
= (uint32_t*)(nmrep
.nmc_ptr
- NFSX_UNSIGNED
);
2910 nfsm_chain_get_32(error
, &nmreq
, val
); /* minorversion */
2911 nfsm_assert(error
, (val
== 0), NFSERR_MINOR_VERS_MISMATCH
);
2912 nfsm_chain_get_32(error
, &nmreq
, cbid
); /* callback ID */
2913 nfsm_chain_get_32(error
, &nmreq
, numops
); /* number of operations */
2915 if ((error
== EBADRPC
) || (error
== NFSERR_MINOR_VERS_MISMATCH
)) {
2917 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
2918 status
= NFSERR_RESOURCE
;
2920 status
= NFSERR_SERVERFAULT
;
2923 nfsm_chain_null(&nmrep
);
2926 /* match the callback ID to a registered mount */
2927 lck_mtx_lock(nfs_global_mutex
);
2928 TAILQ_FOREACH(nmp
, &nfs4_cb_mounts
, nm_cblink
) {
2929 if (nmp
->nm_cbid
!= cbid
) {
2932 /* verify socket's source address matches this mount's server address */
2933 if (!nmp
->nm_saddr
) {
2936 if (nfs_sockaddr_cmp((struct sockaddr
*)&ncbsp
->ncbs_saddr
, nmp
->nm_saddr
) == 0) {
2940 /* mark the NFS mount as busy */
2944 lck_mtx_unlock(nfs_global_mutex
);
2946 /* if no mount match, just drop socket. */
2948 nfsm_chain_null(&nmrep
);
2952 /* process ops, adding results to mrest */
2953 while (numops
> 0) {
2955 nfsm_chain_get_32(error
, &nmreq
, op
);
2960 case NFS_OP_CB_GETATTR
:
2961 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2963 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, fh
);
2964 bmlen
= NFS_ATTR_BITMAP_LEN
;
2965 nfsm_chain_get_bitmap(error
, &nmreq
, bitmap
, bmlen
);
2969 numops
= 0; /* don't process any more ops */
2971 /* find the node for the file handle */
2972 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
->fh_data
, fh
->fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
2974 status
= NFSERR_BADHANDLE
;
2977 numops
= 0; /* don't process any more ops */
2980 nfsm_chain_add_32(error
, &nmrep
, op
);
2981 nfsm_chain_add_32(error
, &nmrep
, status
);
2982 if (!error
&& (status
== EBADRPC
)) {
2986 /* only allow returning size, change, and mtime attrs */
2987 NFS_CLEAR_ATTRIBUTES(&rbitmap
);
2989 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
2990 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_CHANGE
);
2991 attrbytes
+= 2 * NFSX_UNSIGNED
;
2993 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
2994 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_SIZE
);
2995 attrbytes
+= 2 * NFSX_UNSIGNED
;
2997 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
2998 NFS_BITMAP_SET(&rbitmap
, NFS_FATTR_TIME_MODIFY
);
2999 attrbytes
+= 3 * NFSX_UNSIGNED
;
3001 nfsm_chain_add_bitmap(error
, &nmrep
, rbitmap
, NFS_ATTR_BITMAP_LEN
);
3002 nfsm_chain_add_32(error
, &nmrep
, attrbytes
);
3003 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_CHANGE
)) {
3004 nfsm_chain_add_64(error
, &nmrep
,
3005 np
->n_vattr
.nva_change
+ ((np
->n_flag
& NMODIFIED
) ? 1 : 0));
3007 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_SIZE
)) {
3008 nfsm_chain_add_64(error
, &nmrep
, np
->n_size
);
3010 if (NFS_BITMAP_ISSET(&bitmap
, NFS_FATTR_TIME_MODIFY
)) {
3011 nfsm_chain_add_64(error
, &nmrep
, np
->n_vattr
.nva_timesec
[NFSTIME_MODIFY
]);
3012 nfsm_chain_add_32(error
, &nmrep
, np
->n_vattr
.nva_timensec
[NFSTIME_MODIFY
]);
3014 nfs_node_unlock(np
);
3015 vnode_put(NFSTOV(np
));
3019 * If we hit an error building the reply, we can't easily back up.
3020 * So we'll just update the status and hope the server ignores the
3024 case NFS_OP_CB_RECALL
:
3025 // (STATEID, TRUNCATE, FH) -> (STATUS)
3027 nfsm_chain_get_stateid(error
, &nmreq
, &stateid
);
3028 nfsm_chain_get_32(error
, &nmreq
, truncate
);
3029 nfsm_chain_get_fh(error
, &nmreq
, NFS_VER4
, fh
);
3033 numops
= 0; /* don't process any more ops */
3035 /* find the node for the file handle */
3036 error
= nfs_nget(nmp
->nm_mountp
, NULL
, NULL
, fh
->fh_data
, fh
->fh_len
, NULL
, NULL
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
);
3038 status
= NFSERR_BADHANDLE
;
3041 numops
= 0; /* don't process any more ops */
3042 } else if (!(np
->n_openflags
& N_DELEG_MASK
) ||
3043 bcmp(&np
->n_dstateid
, &stateid
, sizeof(stateid
))) {
3044 /* delegation stateid state doesn't match */
3045 status
= NFSERR_BAD_STATEID
;
3046 numops
= 0; /* don't process any more ops */
3048 if (!status
) { /* add node to recall queue, and wake socket thread */
3049 nfs4_delegation_return_enqueue(np
);
3052 nfs_node_unlock(np
);
3053 vnode_put(NFSTOV(np
));
3056 nfsm_chain_add_32(error
, &nmrep
, op
);
3057 nfsm_chain_add_32(error
, &nmrep
, status
);
3058 if (!error
&& (status
== EBADRPC
)) {
3062 case NFS_OP_CB_ILLEGAL
:
3064 nfsm_chain_add_32(error
, &nmrep
, NFS_OP_CB_ILLEGAL
);
3065 status
= NFSERR_OP_ILLEGAL
;
3066 nfsm_chain_add_32(error
, &nmrep
, status
);
3067 numops
= 0; /* don't process any more ops */
3073 if (!status
&& error
) {
3074 if (error
== EBADRPC
) {
3076 } else if ((error
== ENOBUFS
) || (error
== ENOMEM
)) {
3077 status
= NFSERR_RESOURCE
;
3079 status
= NFSERR_SERVERFAULT
;
3084 /* Now, set the numres field */
3085 *pnumres
= txdr_unsigned(numres
);
3086 nfsm_chain_build_done(error
, &nmrep
);
3087 nfsm_chain_null(&nmrep
);
3089 /* drop the callback reference on the mount */
3090 lck_mtx_lock(nfs_global_mutex
);
3092 if (!nmp
->nm_cbid
) {
3093 wakeup(&nmp
->nm_cbrefs
);
3095 lck_mtx_unlock(nfs_global_mutex
);
3100 if (status
== EBADRPC
) {
3101 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3104 /* build reply header */
3105 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mhead
);
3106 nfsm_chain_init(&nmrep
, mhead
);
3107 nfsm_chain_add_32(error
, &nmrep
, 0); /* insert space for an RPC record mark */
3108 nfsm_chain_add_32(error
, &nmrep
, xid
);
3109 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
3110 if ((status
== ERPCMISMATCH
) || (status
& NFSERR_AUTHERR
)) {
3111 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
3112 if (status
& NFSERR_AUTHERR
) {
3113 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
3114 nfsm_chain_add_32(error
, &nmrep
, (status
& ~NFSERR_AUTHERR
));
3116 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
3117 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
3118 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
3122 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
3123 /* XXX RPCAUTH_NULL verifier */
3124 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
3125 nfsm_chain_add_32(error
, &nmrep
, 0);
3126 /* accepted status */
3129 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
3132 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
3133 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
3134 nfsm_chain_add_32(error
, &nmrep
, NFS4_CALLBACK_PROG_VERSION
);
3137 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
3140 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
3143 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
3144 if (status
!= NFSERR_RETVOID
) {
3145 nfsm_chain_add_32(error
, &nmrep
, status
);
3150 nfsm_chain_build_done(error
, &nmrep
);
3152 nfsm_chain_null(&nmrep
);
3155 error
= mbuf_setnext(nmrep
.nmc_mcur
, mrest
);
3157 printf("nfs cb: mbuf_setnext failed %d\n", error
);
3161 /* Calculate the size of the reply */
3163 for (m
= nmrep
.nmc_mhead
; m
; m
= mbuf_next(m
)) {
3164 replen
+= mbuf_len(m
);
3166 mbuf_pkthdr_setlen(mhead
, replen
);
3167 error
= mbuf_pkthdr_setrcvif(mhead
, NULL
);
3168 nfsm_chain_set_recmark(error
, &nmrep
, (replen
- NFSX_UNSIGNED
) | 0x80000000);
3169 nfsm_chain_null(&nmrep
);
3171 /* send the reply */
3172 bzero(&msg
, sizeof(msg
));
3173 error
= sock_sendmbuf(so
, &msg
, mhead
, 0, &sentlen
);
3175 if (!error
&& ((int)sentlen
!= replen
)) {
3176 error
= EWOULDBLOCK
;
3178 if (error
== EWOULDBLOCK
) { /* inability to send response is considered fatal */
3183 nfsm_chain_cleanup(&nmrep
);
3194 NFS_ZFREE(nfs_fhandle_zone
, fh
);
3197 #endif /* CONFIG_NFS4 */
3200 * Initialize an nfs_rpc_record_state structure.
3203 nfs_rpc_record_state_init(struct nfs_rpc_record_state
*nrrsp
)
3205 bzero(nrrsp
, sizeof(*nrrsp
));
3206 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3210 * Clean up an nfs_rpc_record_state structure.
3213 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state
*nrrsp
)
3215 if (nrrsp
->nrrs_m
) {
3216 mbuf_freem(nrrsp
->nrrs_m
);
3217 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3222 * Read the next (marked) RPC record from the socket.
3224 * *recvp returns if any data was received.
3225 * *mp returns the next complete RPC record
3228 nfs_rpc_record_read(socket_t so
, struct nfs_rpc_record_state
*nrrsp
, int flags
, int *recvp
, mbuf_t
*mp
)
3239 /* read the TCP RPC record marker */
3240 while (!error
&& nrrsp
->nrrs_markerleft
) {
3241 aio
.iov_base
= ((char*)&nrrsp
->nrrs_fragleft
+
3242 sizeof(nrrsp
->nrrs_fragleft
) - nrrsp
->nrrs_markerleft
);
3243 aio
.iov_len
= nrrsp
->nrrs_markerleft
;
3244 bzero(&msg
, sizeof(msg
));
3247 error
= sock_receive(so
, &msg
, flags
, &rcvlen
);
3248 if (error
|| !rcvlen
) {
3252 nrrsp
->nrrs_markerleft
-= rcvlen
;
3253 if (nrrsp
->nrrs_markerleft
) {
3256 /* record marker complete */
3257 nrrsp
->nrrs_fragleft
= ntohl(nrrsp
->nrrs_fragleft
);
3258 if (nrrsp
->nrrs_fragleft
& 0x80000000) {
3259 nrrsp
->nrrs_lastfrag
= 1;
3260 nrrsp
->nrrs_fragleft
&= ~0x80000000;
3262 nrrsp
->nrrs_reclen
+= nrrsp
->nrrs_fragleft
;
3263 if (nrrsp
->nrrs_reclen
> NFS_MAXPACKET
) {
3264 /* This is SERIOUS! We are out of sync with the sender. */
3265 log(LOG_ERR
, "impossible RPC record length (%d) on callback", nrrsp
->nrrs_reclen
);
3270 /* read the TCP RPC record fragment */
3271 while (!error
&& !nrrsp
->nrrs_markerleft
&& nrrsp
->nrrs_fragleft
) {
3273 rcvlen
= nrrsp
->nrrs_fragleft
;
3274 error
= sock_receivembuf(so
, NULL
, &m
, flags
, &rcvlen
);
3275 if (error
|| !rcvlen
|| !m
) {
3279 /* append mbufs to list */
3280 nrrsp
->nrrs_fragleft
-= rcvlen
;
3281 if (!nrrsp
->nrrs_m
) {
3284 error
= mbuf_setnext(nrrsp
->nrrs_mlast
, m
);
3286 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error
);
3291 while (mbuf_next(m
)) {
3294 nrrsp
->nrrs_mlast
= m
;
3297 /* done reading fragment? */
3298 if (!error
&& !nrrsp
->nrrs_markerleft
&& !nrrsp
->nrrs_fragleft
) {
3299 /* reset socket fragment parsing state */
3300 nrrsp
->nrrs_markerleft
= sizeof(nrrsp
->nrrs_fragleft
);
3301 if (nrrsp
->nrrs_lastfrag
) {
3302 /* RPC record complete */
3303 *mp
= nrrsp
->nrrs_m
;
3304 /* reset socket record parsing state */
3305 nrrsp
->nrrs_reclen
= 0;
3306 nrrsp
->nrrs_m
= nrrsp
->nrrs_mlast
= NULL
;
3307 nrrsp
->nrrs_lastfrag
= 0;
3317 * The NFS client send routine.
3319 * Send the given NFS request out the mount's socket.
3320 * Holds nfs_sndlock() for the duration of this call.
3322 * - check for request termination (sigintr)
3323 * - wait for reconnect, if necessary
3324 * - UDP: check the congestion window
3325 * - make a copy of the request to send
3326 * - UDP: update the congestion window
3327 * - send the request
3329 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3330 * rexmit count is also updated if this isn't the first send.
3332 * If the send is not successful, make sure R_MUSTRESEND is set.
3333 * If this wasn't the first transmit, set R_RESENDERR.
3334 * Also, undo any UDP congestion window changes made.
3336 * If the error appears to indicate that the socket should
3337 * be reconnected, mark the socket for reconnection.
3339 * Only return errors when the request should be aborted.
3342 nfs_send(struct nfsreq
*req
, int wait
)
3344 struct nfsmount
*nmp
;
3345 struct nfs_socket
*nso
;
3346 int error
, error2
, sotype
, rexmit
, slpflag
= 0, needrecon
;
3348 struct sockaddr
*sendnam
;
3351 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3354 error
= nfs_sndlock(req
);
3356 lck_mtx_lock(&req
->r_mtx
);
3357 req
->r_error
= error
;
3358 req
->r_flags
&= ~R_SENDING
;
3359 lck_mtx_unlock(&req
->r_mtx
);
3363 error
= nfs_sigintr(req
->r_nmp
, req
, NULL
, 0);
3366 lck_mtx_lock(&req
->r_mtx
);
3367 req
->r_error
= error
;
3368 req
->r_flags
&= ~R_SENDING
;
3369 lck_mtx_unlock(&req
->r_mtx
);
3373 sotype
= nmp
->nm_sotype
;
3376 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3377 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3379 if (((req
->r_flags
& R_SETUP
) && !(nmp
->nm_sockflags
& NMSOCK_SETUP
)) ||
3380 ((req
->r_flags
& R_RECOVER
) && !(nmp
->nm_sockflags
& NMSOCK_READY
))) {
3383 lck_mtx_lock(&req
->r_mtx
);
3384 req
->r_error
= error
;
3385 req
->r_flags
&= ~R_SENDING
;
3386 lck_mtx_unlock(&req
->r_mtx
);
3390 /* If the socket needs reconnection, do that now. */
3391 /* wait until socket is ready - unless this request is part of setup */
3392 lck_mtx_lock(&nmp
->nm_lock
);
3393 if (!(nmp
->nm_sockflags
& NMSOCK_READY
) &&
3394 !((nmp
->nm_sockflags
& NMSOCK_SETUP
) && (req
->r_flags
& R_SETUP
))) {
3395 if (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) {
3398 lck_mtx_unlock(&nmp
->nm_lock
);
3401 lck_mtx_lock(&req
->r_mtx
);
3402 req
->r_flags
&= ~R_SENDING
;
3403 req
->r_flags
|= R_MUSTRESEND
;
3405 lck_mtx_unlock(&req
->r_mtx
);
3408 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req
->r_xid
);
3409 lck_mtx_lock(&req
->r_mtx
);
3410 req
->r_flags
&= ~R_MUSTRESEND
;
3412 lck_mtx_unlock(&req
->r_mtx
);
3413 lck_mtx_lock(&nmp
->nm_lock
);
3414 while (!(nmp
->nm_sockflags
& NMSOCK_READY
)) {
3415 /* don't bother waiting if the socket thread won't be reconnecting it */
3416 if (nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) {
3420 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (nmp
->nm_reconnect_start
> 0)) {
3423 if ((now
.tv_sec
- nmp
->nm_reconnect_start
) >= 8) {
3424 /* soft mount in reconnect for a while... terminate ASAP */
3425 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
3426 req
->r_flags
|= R_SOFTTERM
;
3427 req
->r_error
= error
= ETIMEDOUT
;
3431 /* make sure socket thread is running, then wait */
3432 nfs_mount_sock_thread_wake(nmp
);
3433 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3436 msleep(req
, &nmp
->nm_lock
, slpflag
| PSOCK
, "nfsconnectwait", &ts
);
3439 lck_mtx_unlock(&nmp
->nm_lock
);
3441 lck_mtx_lock(&req
->r_mtx
);
3442 req
->r_error
= error
;
3443 req
->r_flags
&= ~R_SENDING
;
3444 lck_mtx_unlock(&req
->r_mtx
);
3450 /* note that we're using the mount's socket to do the send */
3451 nmp
->nm_state
|= NFSSTA_SENDING
; /* will be cleared by nfs_sndunlock() */
3452 lck_mtx_unlock(&nmp
->nm_lock
);
3455 lck_mtx_lock(&req
->r_mtx
);
3456 req
->r_flags
&= ~R_SENDING
;
3457 req
->r_flags
|= R_MUSTRESEND
;
3459 lck_mtx_unlock(&req
->r_mtx
);
3463 lck_mtx_lock(&req
->r_mtx
);
3464 rexmit
= (req
->r_flags
& R_SENT
);
3466 if (sotype
== SOCK_DGRAM
) {
3467 lck_mtx_lock(&nmp
->nm_lock
);
3468 if (!(req
->r_flags
& R_CWND
) && (nmp
->nm_sent
>= nmp
->nm_cwnd
)) {
3469 /* if we can't send this out yet, wait on the cwnd queue */
3470 slpflag
= (NMFLAG(nmp
, INTR
) && req
->r_thread
) ? PCATCH
: 0;
3471 lck_mtx_unlock(&nmp
->nm_lock
);
3473 req
->r_flags
&= ~R_SENDING
;
3474 req
->r_flags
|= R_MUSTRESEND
;
3475 lck_mtx_unlock(&req
->r_mtx
);
3480 lck_mtx_lock(&nmp
->nm_lock
);
3481 while (nmp
->nm_sent
>= nmp
->nm_cwnd
) {
3482 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 1))) {
3485 TAILQ_INSERT_TAIL(&nmp
->nm_cwndq
, req
, r_cchain
);
3486 msleep(req
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfswaitcwnd", &ts
);
3488 if ((req
->r_cchain
.tqe_next
!= NFSREQNOLIST
)) {
3489 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
3490 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3493 lck_mtx_unlock(&nmp
->nm_lock
);
3497 * We update these *before* the send to avoid racing
3498 * against others who may be looking to send requests.
3501 /* first transmit */
3502 req
->r_flags
|= R_CWND
;
3503 nmp
->nm_sent
+= NFS_CWNDSCALE
;
3506 * When retransmitting, turn timing off
3507 * and divide congestion window by 2.
3509 req
->r_flags
&= ~R_TIMING
;
3511 if (nmp
->nm_cwnd
< NFS_CWNDSCALE
) {
3512 nmp
->nm_cwnd
= NFS_CWNDSCALE
;
3515 lck_mtx_unlock(&nmp
->nm_lock
);
3518 req
->r_flags
&= ~R_MUSTRESEND
;
3519 lck_mtx_unlock(&req
->r_mtx
);
3521 error
= mbuf_copym(req
->r_mhead
, 0, MBUF_COPYALL
,
3522 wait
? MBUF_WAITOK
: MBUF_DONTWAIT
, &mreqcopy
);
3525 log(LOG_INFO
, "nfs_send: mbuf copy failed %d\n", error
);
3528 lck_mtx_lock(&req
->r_mtx
);
3529 req
->r_flags
&= ~R_SENDING
;
3530 req
->r_flags
|= R_MUSTRESEND
;
3532 lck_mtx_unlock(&req
->r_mtx
);
3536 bzero(&msg
, sizeof(msg
));
3537 if ((sotype
!= SOCK_STREAM
) && !sock_isconnected(nso
->nso_so
) && ((sendnam
= nmp
->nm_saddr
))) {
3538 msg
.msg_name
= (caddr_t
)sendnam
;
3539 msg
.msg_namelen
= sendnam
->sa_len
;
3541 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy
);
3542 error
= sock_sendmbuf(nso
->nso_so
, &msg
, mreqcopy
, 0, &sentlen
);
3543 if (error
|| (sentlen
!= req
->r_mreqlen
)) {
3544 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3545 req
->r_xid
, (int)sentlen
, (int)req
->r_mreqlen
, error
);
3548 if (!error
&& (sentlen
!= req
->r_mreqlen
)) {
3549 error
= EWOULDBLOCK
;
3551 needrecon
= ((sotype
== SOCK_STREAM
) && sentlen
&& (sentlen
!= req
->r_mreqlen
));
3553 lck_mtx_lock(&req
->r_mtx
);
3554 req
->r_flags
&= ~R_SENDING
;
3556 if (rexmit
&& (++req
->r_rexmit
> NFS_MAXREXMIT
)) {
3557 req
->r_rexmit
= NFS_MAXREXMIT
;
3562 req
->r_flags
&= ~R_RESENDERR
;
3564 OSAddAtomic64(1, &nfsstats
.rpcretries
);
3566 req
->r_flags
|= R_SENT
;
3567 if (req
->r_flags
& R_WAITSENT
) {
3568 req
->r_flags
&= ~R_WAITSENT
;
3572 lck_mtx_unlock(&req
->r_mtx
);
3577 req
->r_flags
|= R_MUSTRESEND
;
3579 req
->r_flags
|= R_RESENDERR
;
3581 if ((error
== EINTR
) || (error
== ERESTART
)) {
3582 req
->r_error
= error
;
3584 lck_mtx_unlock(&req
->r_mtx
);
3586 if (sotype
== SOCK_DGRAM
) {
3588 * Note: even though a first send may fail, we consider
3589 * the request sent for congestion window purposes.
3590 * So we don't need to undo any of the changes made above.
3593 * Socket errors ignored for connectionless sockets??
3594 * For now, ignore them all
3596 if ((error
!= EINTR
) && (error
!= ERESTART
) &&
3597 (error
!= EWOULDBLOCK
) && (error
!= EIO
) && (nso
== nmp
->nm_nso
)) {
3598 int clearerror
= 0, optlen
= sizeof(clearerror
);
3599 sock_getsockopt(nso
->nso_so
, SOL_SOCKET
, SO_ERROR
, &clearerror
, &optlen
);
3600 #ifdef NFS_SOCKET_DEBUGGING
3602 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3609 /* check if it appears we should reconnect the socket */
3612 /* if send timed out, reconnect if on TCP */
3613 if (sotype
!= SOCK_STREAM
) {
3629 /* case ECANCELED??? */
3633 if (needrecon
&& (nso
== nmp
->nm_nso
)) { /* mark socket as needing reconnect */
3634 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req
->r_xid
, error
);
3635 nfs_need_reconnect(nmp
);
3640 if (nfs_is_dead(error
, nmp
)) {
3645 * Don't log some errors:
3646 * EPIPE errors may be common with servers that drop idle connections.
3647 * EADDRNOTAVAIL may occur on network transitions.
3648 * ENOTCONN may occur under some network conditions.
3650 if ((error
== EPIPE
) || (error
== EADDRNOTAVAIL
) || (error
== ENOTCONN
)) {
3653 if (error
&& (error
!= EINTR
) && (error
!= ERESTART
)) {
3654 log(LOG_INFO
, "nfs send error %d for server %s\n", error
,
3655 !req
->r_nmp
? "<unmounted>" :
3656 vfs_statfs(req
->r_nmp
->nm_mountp
)->f_mntfromname
);
3659 /* prefer request termination error over other errors */
3660 error2
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0);
3665 /* only allow the following errors to be returned */
3666 if ((error
!= EINTR
) && (error
!= ERESTART
) && (error
!= EIO
) &&
3667 (error
!= ENXIO
) && (error
!= ETIMEDOUT
)) {
3669 * We got some error we don't know what do do with,
3670 * i.e., we're not reconnecting, we map it to
3671 * EIO. Presumably our send failed and we better tell
3672 * the caller so they don't wait for a reply that is
3673 * never going to come. If we are reconnecting we
3674 * return 0 and the request will be resent.
3676 error
= needrecon
? 0 : EIO
;
3682 * NFS client socket upcalls
3684 * Pull RPC replies out of an NFS mount's socket and match them
3685 * up with the pending request.
3687 * The datagram code is simple because we always get whole
3688 * messages out of the socket.
3690 * The stream code is more involved because we have to parse
3691 * the RPC records out of the stream.
3694 /* NFS client UDP socket upcall */
3696 nfs_udp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3698 struct nfsmount
*nmp
= arg
;
3699 struct nfs_socket
*nso
= nmp
->nm_nso
;
3704 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3709 /* make sure we're on the current socket */
3710 if (!nso
|| (nso
->nso_so
!= so
)) {
3716 error
= sock_receivembuf(so
, NULL
, &m
, MSG_DONTWAIT
, &rcvlen
);
3718 nfs_request_match_reply(nmp
, m
);
3720 } while (m
&& !error
);
3722 if (error
&& (error
!= EWOULDBLOCK
)) {
3723 /* problems with the socket... mark for reconnection */
3724 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error
);
3725 nfs_need_reconnect(nmp
);
3729 /* NFS client TCP socket upcall */
3731 nfs_tcp_rcv(socket_t so
, void *arg
, __unused
int waitflag
)
3733 struct nfsmount
*nmp
= arg
;
3734 struct nfs_socket
*nso
= nmp
->nm_nso
;
3735 struct nfs_rpc_record_state nrrs
;
3741 if (nmp
->nm_sockflags
& NMSOCK_CONNECTING
) {
3745 /* make sure we're on the current socket */
3746 lck_mtx_lock(&nmp
->nm_lock
);
3748 if (!nso
|| (nso
->nso_so
!= so
) || (nmp
->nm_sockflags
& (NMSOCK_DISCONNECTING
))) {
3749 lck_mtx_unlock(&nmp
->nm_lock
);
3752 lck_mtx_unlock(&nmp
->nm_lock
);
3754 /* make sure this upcall should be trying to do work */
3755 lck_mtx_lock(&nso
->nso_lock
);
3756 if (nso
->nso_flags
& (NSO_UPCALL
| NSO_DISCONNECTING
| NSO_DEAD
)) {
3757 lck_mtx_unlock(&nso
->nso_lock
);
3760 nso
->nso_flags
|= NSO_UPCALL
;
3761 nrrs
= nso
->nso_rrs
;
3762 lck_mtx_unlock(&nso
->nso_lock
);
3764 /* loop while we make error-free progress */
3765 while (!error
&& recv
) {
3766 error
= nfs_rpc_record_read(so
, &nrrs
, MSG_DONTWAIT
, &recv
, &m
);
3767 if (m
) { /* match completed response with request */
3768 nfs_request_match_reply(nmp
, m
);
3772 /* Update the sockets's rpc parsing state */
3773 lck_mtx_lock(&nso
->nso_lock
);
3774 nso
->nso_rrs
= nrrs
;
3775 if (nso
->nso_flags
& NSO_DISCONNECTING
) {
3778 nso
->nso_flags
&= ~NSO_UPCALL
;
3779 lck_mtx_unlock(&nso
->nso_lock
);
3781 wakeup(&nso
->nso_flags
);
3784 #ifdef NFS_SOCKET_DEBUGGING
3785 if (!recv
&& (error
!= EWOULDBLOCK
)) {
3786 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error
);
3789 /* note: no error and no data indicates server closed its end */
3790 if ((error
!= EWOULDBLOCK
) && (error
|| !recv
)) {
3791 /* problems with the socket... mark for reconnection */
3792 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error
);
3793 nfs_need_reconnect(nmp
);
3798 * "poke" a socket to try to provoke any pending errors
3801 nfs_sock_poke(struct nfsmount
*nmp
)
3809 lck_mtx_lock(&nmp
->nm_lock
);
3810 if ((nmp
->nm_sockflags
& NMSOCK_UNMOUNT
) ||
3811 !(nmp
->nm_sockflags
& NMSOCK_READY
) || !nmp
->nm_nso
|| !nmp
->nm_nso
->nso_so
) {
3812 /* Nothing to poke */
3813 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3814 wakeup(&nmp
->nm_sockflags
);
3815 lck_mtx_unlock(&nmp
->nm_lock
);
3818 lck_mtx_unlock(&nmp
->nm_lock
);
3819 aio
.iov_base
= &dummy
;
3822 bzero(&msg
, sizeof(msg
));
3825 error
= sock_send(nmp
->nm_nso
->nso_so
, &msg
, MSG_DONTWAIT
, &len
);
3826 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error
);
3827 lck_mtx_lock(&nmp
->nm_lock
);
3828 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
3829 wakeup(&nmp
->nm_sockflags
);
3830 lck_mtx_unlock(&nmp
->nm_lock
);
3831 nfs_is_dead(error
, nmp
);
3835 * Match an RPC reply with the corresponding request
3838 nfs_request_match_reply(struct nfsmount
*nmp
, mbuf_t mrep
)
3841 struct nfsm_chain nmrep
;
3842 u_int32_t reply
= 0, rxid
= 0;
3843 int error
= 0, asyncioq
, t1
;
3845 bzero(&nmrep
, sizeof(nmrep
));
3846 /* Get the xid and check that it is an rpc reply */
3847 nfsm_chain_dissect_init(error
, &nmrep
, mrep
);
3848 nfsm_chain_get_32(error
, &nmrep
, rxid
);
3849 nfsm_chain_get_32(error
, &nmrep
, reply
);
3850 if (error
|| (reply
!= RPC_REPLY
)) {
3851 OSAddAtomic64(1, &nfsstats
.rpcinvalid
);
3857 * Loop through the request list to match up the reply
3858 * Iff no match, just drop it.
3860 lck_mtx_lock(nfs_request_mutex
);
3861 TAILQ_FOREACH(req
, &nfs_reqq
, r_chain
) {
3862 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3865 /* looks like we have it, grab lock and double check */
3866 lck_mtx_lock(&req
->r_mtx
);
3867 if (req
->r_nmrep
.nmc_mhead
|| (rxid
!= R_XID32(req
->r_xid
))) {
3868 lck_mtx_unlock(&req
->r_mtx
);
3872 req
->r_nmrep
= nmrep
;
3873 lck_mtx_lock(&nmp
->nm_lock
);
3874 if (nmp
->nm_sotype
== SOCK_DGRAM
) {
3876 * Update congestion window.
3877 * Do the additive increase of one rpc/rtt.
3879 FSDBG(530, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
3880 if (nmp
->nm_cwnd
<= nmp
->nm_sent
) {
3882 ((NFS_CWNDSCALE
* NFS_CWNDSCALE
) +
3883 (nmp
->nm_cwnd
>> 1)) / nmp
->nm_cwnd
;
3884 if (nmp
->nm_cwnd
> NFS_MAXCWND
) {
3885 nmp
->nm_cwnd
= NFS_MAXCWND
;
3888 if (req
->r_flags
& R_CWND
) {
3889 nmp
->nm_sent
-= NFS_CWNDSCALE
;
3890 req
->r_flags
&= ~R_CWND
;
3892 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
3893 /* congestion window is open, poke the cwnd queue */
3894 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
3895 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
3896 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
3901 * Update rtt using a gain of 0.125 on the mean
3902 * and a gain of 0.25 on the deviation.
3904 if (req
->r_flags
& R_TIMING
) {
3906 * Since the timer resolution of
3907 * NFS_HZ is so course, it can often
3908 * result in r_rtt == 0. Since
3909 * r_rtt == N means that the actual
3910 * rtt is between N+dt and N+2-dt ticks,
3913 if (proct
[req
->r_procnum
] == 0) {
3914 panic("nfs_request_match_reply: proct[%d] is zero", req
->r_procnum
);
3916 t1
= req
->r_rtt
+ 1;
3917 t1
-= (NFS_SRTT(req
) >> 3);
3918 NFS_SRTT(req
) += t1
;
3922 t1
-= (NFS_SDRTT(req
) >> 2);
3923 NFS_SDRTT(req
) += t1
;
3925 nmp
->nm_timeouts
= 0;
3926 lck_mtx_unlock(&nmp
->nm_lock
);
3927 /* signal anyone waiting on this request */
3929 asyncioq
= (req
->r_callback
.rcb_func
!= NULL
);
3931 if (nfs_request_using_gss(req
)) {
3932 nfs_gss_clnt_rpcdone(req
);
3934 #endif /* CONFIG_NFS_GSS */
3935 lck_mtx_unlock(&req
->r_mtx
);
3936 lck_mtx_unlock(nfs_request_mutex
);
3937 /* if it's an async RPC with a callback, queue it up */
3939 nfs_asyncio_finish(req
);
3945 /* not matched to a request, so drop it. */
3946 lck_mtx_unlock(nfs_request_mutex
);
3947 OSAddAtomic64(1, &nfsstats
.rpcunexpected
);
3953 * Wait for the reply for a given request...
3954 * ...potentially resending the request if necessary.
3957 nfs_wait_reply(struct nfsreq
*req
)
3959 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
3960 int error
= 0, slpflag
, first
= 1;
3962 if (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
3968 lck_mtx_lock(&req
->r_mtx
);
3969 while (!req
->r_nmrep
.nmc_mhead
) {
3970 if ((error
= nfs_sigintr(req
->r_nmp
, req
, first
? NULL
: req
->r_thread
, 0))) {
3973 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
3976 /* check if we need to resend */
3977 if (req
->r_flags
& R_MUSTRESEND
) {
3978 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3979 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
3980 req
->r_flags
|= R_SENDING
;
3981 lck_mtx_unlock(&req
->r_mtx
);
3982 if (nfs_request_using_gss(req
)) {
3984 * It's an RPCSEC_GSS request.
3985 * Can't just resend the original request
3986 * without bumping the cred sequence number.
3987 * Go back and re-build the request.
3989 lck_mtx_lock(&req
->r_mtx
);
3990 req
->r_flags
&= ~R_SENDING
;
3991 lck_mtx_unlock(&req
->r_mtx
);
3994 error
= nfs_send(req
, 1);
3995 lck_mtx_lock(&req
->r_mtx
);
3996 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3997 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
, error
);
4001 if (((error
= req
->r_error
)) || req
->r_nmrep
.nmc_mhead
) {
4005 /* need to poll if we're P_NOREMOTEHANG */
4006 if (nfs_noremotehang(req
->r_thread
)) {
4009 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitreply", &ts
);
4010 first
= slpflag
= 0;
4012 lck_mtx_unlock(&req
->r_mtx
);
4018 * An NFS request goes something like this:
4019 * (nb: always frees up mreq mbuf list)
4020 * nfs_request_create()
4021 * - allocates a request struct if one is not provided
4022 * - initial fill-in of the request struct
4023 * nfs_request_add_header()
4024 * - add the RPC header
4025 * nfs_request_send()
4026 * - link it into list
4027 * - call nfs_send() for first transmit
4028 * nfs_request_wait()
4029 * - call nfs_wait_reply() to wait for the reply
4030 * nfs_request_finish()
4031 * - break down rpc header and return with error or nfs reply
4032 * pointed to by nmrep.
4033 * nfs_request_rele()
4034 * nfs_request_destroy()
4035 * - clean up the request struct
4036 * - free the request struct if it was allocated by nfs_request_create()
4040 * Set up an NFS request struct (allocating if no request passed in).
4045 mount_t mp
, /* used only if !np */
4046 struct nfsm_chain
*nmrest
,
4050 struct nfsreq
**reqp
)
4052 struct nfsreq
*req
, *newreq
= NULL
;
4053 struct nfsmount
*nmp
;
4057 /* allocate a new NFS request structure */
4058 req
= newreq
= zalloc_flags(nfs_req_zone
, Z_WAITOK
| Z_ZERO
);
4060 bzero(req
, sizeof(*req
));
4062 if (req
== newreq
) {
4063 req
->r_flags
= R_ALLOCATED
;
4066 nmp
= VFSTONFS(np
? NFSTOMP(np
) : mp
);
4067 if (nfs_mount_gone(nmp
)) {
4069 NFS_ZFREE(nfs_req_zone
, newreq
);
4073 lck_mtx_lock(&nmp
->nm_lock
);
4074 if ((nmp
->nm_state
& (NFSSTA_FORCE
| NFSSTA_DEAD
)) &&
4075 (nmp
->nm_state
& NFSSTA_TIMEO
)) {
4076 lck_mtx_unlock(&nmp
->nm_lock
);
4077 mbuf_freem(nmrest
->nmc_mhead
);
4078 nmrest
->nmc_mhead
= NULL
;
4080 NFS_ZFREE(nfs_req_zone
, newreq
);
4085 if ((nmp
->nm_vers
!= NFS_VER4
) && (procnum
>= 0) && (procnum
< NFS_NPROCS
)) {
4086 OSAddAtomic64(1, &nfsstats
.rpccnt
[procnum
]);
4088 if ((nmp
->nm_vers
== NFS_VER4
) && (procnum
!= NFSPROC4_COMPOUND
) && (procnum
!= NFSPROC4_NULL
)) {
4089 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum
);
4092 lck_mtx_init(&req
->r_mtx
, nfs_request_grp
, LCK_ATTR_NULL
);
4096 req
->r_thread
= thd
;
4098 req
->r_flags
|= R_NOINTR
;
4100 if (IS_VALID_CRED(cred
)) {
4101 kauth_cred_ref(cred
);
4104 req
->r_procnum
= procnum
;
4105 if (proct
[procnum
] > 0) {
4106 req
->r_flags
|= R_TIMING
;
4108 req
->r_nmrep
.nmc_mhead
= NULL
;
4109 SLIST_INIT(&req
->r_gss_seqlist
);
4110 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
4111 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4112 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4114 /* set auth flavor to use for request */
4116 req
->r_auth
= RPCAUTH_NONE
;
4117 } else if (req
->r_np
&& (req
->r_np
->n_auth
!= RPCAUTH_INVALID
)) {
4118 req
->r_auth
= req
->r_np
->n_auth
;
4120 req
->r_auth
= nmp
->nm_auth
;
4123 lck_mtx_unlock(&nmp
->nm_lock
);
4125 /* move the request mbuf chain to the nfsreq */
4126 req
->r_mrest
= nmrest
->nmc_mhead
;
4127 nmrest
->nmc_mhead
= NULL
;
4129 req
->r_flags
|= R_INITTED
;
4138 * Clean up and free an NFS request structure.
4141 nfs_request_destroy(struct nfsreq
*req
)
4143 struct nfsmount
*nmp
;
4144 int clearjbtimeo
= 0;
4147 struct gss_seq
*gsp
, *ngsp
;
4150 if (!req
|| !(req
->r_flags
& R_INITTED
)) {
4154 req
->r_flags
&= ~R_INITTED
;
4155 if (req
->r_lflags
& RL_QUEUED
) {
4156 nfs_reqdequeue(req
);
4159 if (req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
4161 * Still on an async I/O queue?
4162 * %%% But which one, we may be on a local iod.
4164 lck_mtx_lock(nfsiod_mutex
);
4165 if (nmp
&& req
->r_achain
.tqe_next
!= NFSREQNOLIST
) {
4166 TAILQ_REMOVE(&nmp
->nm_iodq
, req
, r_achain
);
4167 req
->r_achain
.tqe_next
= NFSREQNOLIST
;
4169 lck_mtx_unlock(nfsiod_mutex
);
4172 lck_mtx_lock(&req
->r_mtx
);
4174 lck_mtx_lock(&nmp
->nm_lock
);
4175 if (req
->r_flags
& R_CWND
) {
4176 /* Decrement the outstanding request count. */
4177 req
->r_flags
&= ~R_CWND
;
4178 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4179 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4180 /* congestion window is open, poke the cwnd queue */
4181 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4182 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4183 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4187 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4188 if ((req
->r_flags
& R_RESENDQ
) && req
->r_rchain
.tqe_next
!= NFSREQNOLIST
) {
4189 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
4190 req
->r_flags
&= ~R_RESENDQ
;
4191 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
4193 if (req
->r_cchain
.tqe_next
!= NFSREQNOLIST
) {
4194 TAILQ_REMOVE(&nmp
->nm_cwndq
, req
, r_cchain
);
4195 req
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4197 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4198 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4200 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4202 lck_mtx_unlock(&nmp
->nm_lock
);
4204 lck_mtx_unlock(&req
->r_mtx
);
4207 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4210 mbuf_freem(req
->r_mhead
);
4211 } else if (req
->r_mrest
) {
4212 mbuf_freem(req
->r_mrest
);
4214 if (req
->r_nmrep
.nmc_mhead
) {
4215 mbuf_freem(req
->r_nmrep
.nmc_mhead
);
4217 if (IS_VALID_CRED(req
->r_cred
)) {
4218 kauth_cred_unref(&req
->r_cred
);
4221 if (nfs_request_using_gss(req
)) {
4222 nfs_gss_clnt_rpcdone(req
);
4224 SLIST_FOREACH_SAFE(gsp
, &req
->r_gss_seqlist
, gss_seqnext
, ngsp
)
4226 if (req
->r_gss_ctx
) {
4227 nfs_gss_clnt_ctx_unref(req
);
4229 #endif /* CONFIG_NFS_GSS */
4230 if (req
->r_wrongsec
) {
4231 FREE(req
->r_wrongsec
, M_TEMP
);
4234 nfs_mount_rele(nmp
);
4236 lck_mtx_destroy(&req
->r_mtx
, nfs_request_grp
);
4237 if (req
->r_flags
& R_ALLOCATED
) {
4238 NFS_ZFREE(nfs_req_zone
, req
);
4243 nfs_request_ref(struct nfsreq
*req
, int locked
)
4246 lck_mtx_lock(&req
->r_mtx
);
4248 if (req
->r_refs
<= 0) {
4249 panic("nfsreq reference error");
4253 lck_mtx_unlock(&req
->r_mtx
);
4258 nfs_request_rele(struct nfsreq
*req
)
4262 lck_mtx_lock(&req
->r_mtx
);
4263 if (req
->r_refs
<= 0) {
4264 panic("nfsreq reference underflow");
4267 destroy
= (req
->r_refs
== 0);
4268 lck_mtx_unlock(&req
->r_mtx
);
4270 nfs_request_destroy(req
);
4276 * Add an (updated) RPC header with authorization to an NFS request.
4279 nfs_request_add_header(struct nfsreq
*req
)
4281 struct nfsmount
*nmp
;
4285 /* free up any previous header */
4286 if ((m
= req
->r_mhead
)) {
4287 while (m
&& (m
!= req
->r_mrest
)) {
4290 req
->r_mhead
= NULL
;
4294 if (nfs_mount_gone(nmp
)) {
4298 error
= nfsm_rpchead(req
, req
->r_mrest
, &req
->r_xid
, &req
->r_mhead
);
4303 req
->r_mreqlen
= mbuf_pkthdr_len(req
->r_mhead
);
4305 if (nfs_mount_gone(nmp
)) {
4308 lck_mtx_lock(&nmp
->nm_lock
);
4309 if (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) {
4310 req
->r_retry
= nmp
->nm_retry
;
4312 req
->r_retry
= NFS_MAXREXMIT
+ 1; /* past clip limit */
4314 lck_mtx_unlock(&nmp
->nm_lock
);
4321 * Queue an NFS request up and send it out.
4324 nfs_request_send(struct nfsreq
*req
, int wait
)
4326 struct nfsmount
*nmp
;
4329 lck_mtx_lock(&req
->r_mtx
);
4330 req
->r_flags
|= R_SENDING
;
4331 lck_mtx_unlock(&req
->r_mtx
);
4333 lck_mtx_lock(nfs_request_mutex
);
4336 if (nfs_mount_gone(nmp
)) {
4337 lck_mtx_unlock(nfs_request_mutex
);
4342 if (!req
->r_start
) {
4343 req
->r_start
= now
.tv_sec
;
4344 req
->r_lastmsg
= now
.tv_sec
-
4345 ((nmp
->nm_tprintf_delay
) - (nmp
->nm_tprintf_initial_delay
));
4348 OSAddAtomic64(1, &nfsstats
.rpcrequests
);
4351 * Make sure the request is not in the queue.
4353 if (req
->r_lflags
& RL_QUEUED
) {
4355 panic("nfs_request_send: req %p is already in global requests queue", req
);
4357 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
4358 req
->r_lflags
&= ~RL_QUEUED
;
4359 #endif /* DEVELOPMENT */
4363 * Chain request into list of outstanding requests. Be sure
4364 * to put it LAST so timer finds oldest requests first.
4365 * Make sure that the request queue timer is running
4366 * to check for possible request timeout.
4368 TAILQ_INSERT_TAIL(&nfs_reqq
, req
, r_chain
);
4369 req
->r_lflags
|= RL_QUEUED
;
4370 if (!nfs_request_timer_on
) {
4371 nfs_request_timer_on
= 1;
4372 nfs_interval_timer_start(nfs_request_timer_call
,
4375 lck_mtx_unlock(nfs_request_mutex
);
4377 /* Send the request... */
4378 return nfs_send(req
, wait
);
4382 * Call nfs_wait_reply() to wait for the reply.
4385 nfs_request_wait(struct nfsreq
*req
)
4387 req
->r_error
= nfs_wait_reply(req
);
4391 * Finish up an NFS request by dequeueing it and
4392 * doing the initial NFS request reply processing.
4397 struct nfsm_chain
*nmrepp
,
4400 struct nfsmount
*nmp
;
4403 uint32_t verf_len
= 0;
4404 uint32_t reply_status
= 0;
4405 uint32_t rejected_status
= 0;
4406 uint32_t auth_status
= 0;
4407 uint32_t accepted_status
= 0;
4408 struct nfsm_chain nmrep
;
4409 int error
, clearjbtimeo
;
4411 error
= req
->r_error
;
4414 nmrepp
->nmc_mhead
= NULL
;
4417 /* RPC done, unlink the request. */
4418 nfs_reqdequeue(req
);
4420 mrep
= req
->r_nmrep
.nmc_mhead
;
4424 if ((req
->r_flags
& R_CWND
) && nmp
) {
4426 * Decrement the outstanding request count.
4428 req
->r_flags
&= ~R_CWND
;
4429 lck_mtx_lock(&nmp
->nm_lock
);
4430 FSDBG(273, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
4431 nmp
->nm_sent
-= NFS_CWNDSCALE
;
4432 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
4433 /* congestion window is open, poke the cwnd queue */
4434 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
4435 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
4436 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
4439 lck_mtx_unlock(&nmp
->nm_lock
);
4443 if (nfs_request_using_gss(req
)) {
4445 * If the request used an RPCSEC_GSS credential
4446 * then reset its sequence number bit in the
4449 nfs_gss_clnt_rpcdone(req
);
4452 * If we need to re-send, go back and re-build the
4453 * request based on a new sequence number.
4454 * Note that we're using the original XID.
4456 if (error
== EAGAIN
) {
4461 error
= nfs_gss_clnt_args_restore(req
); // remove any trailer mbufs
4462 req
->r_nmrep
.nmc_mhead
= NULL
;
4463 req
->r_flags
|= R_RESTART
;
4464 if (error
== ENEEDAUTH
) {
4465 req
->r_xid
= 0; // get a new XID
4471 #endif /* CONFIG_NFS_GSS */
4474 * If there was a successful reply, make sure to mark the mount as up.
4475 * If a tprintf message was given (or if this is a timed-out soft mount)
4476 * then post a tprintf message indicating the server is alive again.
4479 if ((req
->r_flags
& R_TPRINTFMSG
) ||
4480 (nmp
&& (NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) &&
4481 ((nmp
->nm_state
& (NFSSTA_TIMEO
| NFSSTA_FORCE
| NFSSTA_DEAD
)) == NFSSTA_TIMEO
))) {
4482 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, "is alive again");
4484 nfs_up(nmp
, req
->r_thread
, NFSSTA_TIMEO
, NULL
);
4487 if (!error
&& !nmp
) {
4493 * break down the RPC header and check if ok
4495 nmrep
= req
->r_nmrep
;
4496 nfsm_chain_get_32(error
, &nmrep
, reply_status
);
4498 if (reply_status
== RPC_MSGDENIED
) {
4499 nfsm_chain_get_32(error
, &nmrep
, rejected_status
);
4501 if (rejected_status
== RPC_MISMATCH
) {
4505 nfsm_chain_get_32(error
, &nmrep
, auth_status
);
4507 switch (auth_status
) {
4509 case RPCSEC_GSS_CREDPROBLEM
:
4510 case RPCSEC_GSS_CTXPROBLEM
:
4512 * An RPCSEC_GSS cred or context problem.
4513 * We can't use it anymore.
4514 * Restore the args, renew the context
4515 * and set up for a resend.
4517 error
= nfs_gss_clnt_args_restore(req
);
4518 if (error
&& error
!= ENEEDAUTH
) {
4523 error
= nfs_gss_clnt_ctx_renew(req
);
4529 req
->r_nmrep
.nmc_mhead
= NULL
;
4530 req
->r_xid
= 0; // get a new XID
4531 req
->r_flags
|= R_RESTART
;
4533 #endif /* CONFIG_NFS_GSS */
4541 /* Now check the verifier */
4542 nfsm_chain_get_32(error
, &nmrep
, verf_type
); // verifier flavor
4543 nfsm_chain_get_32(error
, &nmrep
, verf_len
); // verifier length
4546 switch (req
->r_auth
) {
4549 /* Any AUTH_SYS verifier is ignored */
4551 nfsm_chain_adv(error
, &nmrep
, nfsm_rndup(verf_len
));
4553 nfsm_chain_get_32(error
, &nmrep
, accepted_status
);
4559 error
= nfs_gss_clnt_verf_get(req
, &nmrep
,
4560 verf_type
, verf_len
, &accepted_status
);
4562 #endif /* CONFIG_NFS_GSS */
4566 switch (accepted_status
) {
4568 if (req
->r_procnum
== NFSPROC_NULL
) {
4570 * The NFS null procedure is unique,
4571 * in not returning an NFS status.
4575 nfsm_chain_get_32(error
, &nmrep
, *status
);
4579 if ((nmp
->nm_vers
!= NFS_VER2
) && (*status
== NFSERR_TRYLATER
)) {
4581 * It's a JUKEBOX error - delay and try again
4583 int delay
, slpflag
= (NMFLAG(nmp
, INTR
) && !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
4586 req
->r_nmrep
.nmc_mhead
= NULL
;
4587 if ((req
->r_delay
>= 30) && !(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
4588 /* we're not yet completely mounted and */
4589 /* we can't complete an RPC, so we fail */
4590 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4592 error
= req
->r_error
;
4595 req
->r_delay
= !req
->r_delay
? NFS_TRYLATERDEL
: (req
->r_delay
* 2);
4596 if (req
->r_delay
> 30) {
4599 if (nmp
->nm_tprintf_initial_delay
&& (req
->r_delay
>= nmp
->nm_tprintf_initial_delay
)) {
4600 if (!(req
->r_flags
& R_JBTPRINTFMSG
)) {
4601 req
->r_flags
|= R_JBTPRINTFMSG
;
4602 lck_mtx_lock(&nmp
->nm_lock
);
4604 lck_mtx_unlock(&nmp
->nm_lock
);
4606 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_JUKEBOXTIMEO
,
4607 "resource temporarily unavailable (jukebox)", 0);
4609 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && (req
->r_delay
== 30) &&
4610 !(req
->r_flags
& R_NOINTR
)) {
4611 /* for soft mounts, just give up after a short while */
4612 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
4614 error
= req
->r_error
;
4617 delay
= req
->r_delay
;
4618 if (req
->r_callback
.rcb_func
) {
4621 req
->r_resendtime
= now
.tv_sec
+ delay
;
4624 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
4627 tsleep(nfs_request_finish
, PSOCK
| slpflag
, "nfs_jukebox_trylater", hz
);
4629 } while (--delay
> 0);
4631 req
->r_xid
= 0; // get a new XID
4632 req
->r_flags
|= R_RESTART
;
4634 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_TRYLATER
);
4638 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4639 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4640 lck_mtx_lock(&nmp
->nm_lock
);
4642 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4643 lck_mtx_unlock(&nmp
->nm_lock
);
4644 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, "resource available again");
4648 if ((nmp
->nm_vers
>= NFS_VER4
) && (*status
== NFSERR_WRONGSEC
)) {
4650 * Hmmm... we need to try a different security flavor.
4651 * The first time a request hits this, we will allocate an array
4652 * to track flavors to try. We fill the array with the mount's
4653 * preferred flavors or the server's preferred flavors or just the
4654 * flavors we support.
4656 uint32_t srvflavors
[NX_MAX_SEC_FLAVORS
];
4659 /* Call SECINFO to try to get list of flavors from server. */
4660 srvcount
= NX_MAX_SEC_FLAVORS
;
4661 nfs4_secinfo_rpc(nmp
, &req
->r_secinfo
, req
->r_cred
, srvflavors
, &srvcount
);
4663 if (!req
->r_wrongsec
) {
4664 /* first time... set up flavor array */
4665 MALLOC(req
->r_wrongsec
, uint32_t*, NX_MAX_SEC_FLAVORS
* sizeof(uint32_t), M_TEMP
, M_WAITOK
);
4666 if (!req
->r_wrongsec
) {
4671 if (nmp
->nm_sec
.count
) { /* use the mount's preferred list of flavors */
4672 for (; i
< nmp
->nm_sec
.count
; i
++) {
4673 req
->r_wrongsec
[i
] = nmp
->nm_sec
.flavors
[i
];
4675 } else if (srvcount
) { /* otherwise use the server's list of flavors */
4676 for (; i
< srvcount
; i
++) {
4677 req
->r_wrongsec
[i
] = srvflavors
[i
];
4679 } else { /* otherwise, just try the flavors we support. */
4680 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5P
;
4681 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5I
;
4682 req
->r_wrongsec
[i
++] = RPCAUTH_KRB5
;
4683 req
->r_wrongsec
[i
++] = RPCAUTH_SYS
;
4684 req
->r_wrongsec
[i
++] = RPCAUTH_NONE
;
4686 for (; i
< NX_MAX_SEC_FLAVORS
; i
++) { /* invalidate any remaining slots */
4687 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4691 /* clear the current flavor from the list */
4692 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4693 if (req
->r_wrongsec
[i
] == req
->r_auth
) {
4694 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4698 /* find the next flavor to try */
4699 for (i
= 0; i
< NX_MAX_SEC_FLAVORS
; i
++) {
4700 if (req
->r_wrongsec
[i
] != RPCAUTH_INVALID
) {
4701 if (!srvcount
) { /* no server list, just try it */
4704 /* check that it's in the server's list */
4705 for (j
= 0; j
< srvcount
; j
++) {
4706 if (req
->r_wrongsec
[i
] == srvflavors
[j
]) {
4710 if (j
< srvcount
) { /* found */
4713 /* not found in server list */
4714 req
->r_wrongsec
[i
] = RPCAUTH_INVALID
;
4717 if (i
== NX_MAX_SEC_FLAVORS
) {
4718 /* nothing left to try! */
4723 /* retry with the next auth flavor */
4724 req
->r_auth
= req
->r_wrongsec
[i
];
4725 req
->r_xid
= 0; // get a new XID
4726 req
->r_flags
|= R_RESTART
;
4728 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
, NFSERR_WRONGSEC
);
4731 if ((nmp
->nm_vers
>= NFS_VER4
) && req
->r_wrongsec
) {
4733 * We renegotiated security for this request; so update the
4734 * default security flavor for the associated node.
4737 req
->r_np
->n_auth
= req
->r_auth
;
4740 #endif /* CONFIG_NFS4 */
4741 if (*status
== NFS_OK
) {
4743 * Successful NFS request
4746 req
->r_nmrep
.nmc_mhead
= NULL
;
4749 /* Got an NFS error of some kind */
4752 * If the File Handle was stale, invalidate the
4753 * lookup cache, just in case.
4755 if ((*status
== ESTALE
) && req
->r_np
) {
4756 cache_purge(NFSTOV(req
->r_np
));
4757 /* if monitored, also send delete event */
4758 if (vnode_ismonitored(NFSTOV(req
->r_np
))) {
4759 nfs_vnode_notify(req
->r_np
, (VNODE_EVENT_ATTRIB
| VNODE_EVENT_DELETE
));
4762 if (nmp
->nm_vers
== NFS_VER2
) {
4767 req
->r_nmrep
.nmc_mhead
= NULL
;
4770 case RPC_PROGUNAVAIL
:
4771 error
= EPROGUNAVAIL
;
4773 case RPC_PROGMISMATCH
:
4774 error
= ERPCMISMATCH
;
4776 case RPC_PROCUNAVAIL
:
4777 error
= EPROCUNAVAIL
;
4782 case RPC_SYSTEM_ERR
:
4788 if (req
->r_flags
& R_JBTPRINTFMSG
) {
4789 req
->r_flags
&= ~R_JBTPRINTFMSG
;
4790 lck_mtx_lock(&nmp
->nm_lock
);
4792 clearjbtimeo
= (nmp
->nm_jbreqs
== 0) ? NFSSTA_JUKEBOXTIMEO
: 0;
4793 lck_mtx_unlock(&nmp
->nm_lock
);
4795 nfs_up(nmp
, req
->r_thread
, clearjbtimeo
, NULL
);
4798 FSDBG(273, R_XID32(req
->r_xid
), nmp
, req
,
4799 (!error
&& (*status
== NFS_OK
)) ? 0xf0f0f0f0 : error
);
4804 * NFS request using a GSS/Kerberos security flavor?
4807 nfs_request_using_gss(struct nfsreq
*req
)
4809 if (!req
->r_gss_ctx
) {
4812 switch (req
->r_auth
) {
4822 * Perform an NFS request synchronously.
4828 mount_t mp
, /* used only if !np */
4829 struct nfsm_chain
*nmrest
,
4832 struct nfsreq_secinfo_args
*si
,
4833 struct nfsm_chain
*nmrepp
,
4837 return nfs_request2(np
, mp
, nmrest
, procnum
,
4838 vfs_context_thread(ctx
), vfs_context_ucred(ctx
),
4839 si
, 0, nmrepp
, xidp
, status
);
4845 mount_t mp
, /* used only if !np */
4846 struct nfsm_chain
*nmrest
,
4850 struct nfsreq_secinfo_args
*si
,
4852 struct nfsm_chain
*nmrepp
,
4859 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
);
4860 if ((error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, &req
))) {
4863 req
->r_flags
|= (flags
& (R_OPTMASK
| R_SOFT
));
4865 req
->r_secinfo
= *si
;
4868 FSDBG_TOP(273, R_XID32(req
->r_xid
), np
, procnum
, 0);
4871 req
->r_flags
&= ~R_RESTART
;
4872 if ((error
= nfs_request_add_header(req
))) {
4878 if ((error
= nfs_request_send(req
, 1))) {
4881 nfs_request_wait(req
);
4882 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4885 } while (req
->r_flags
& R_RESTART
);
4887 FSDBG_BOT(273, R_XID32(req
->r_xid
), np
, procnum
, error
);
4888 nfs_request_rele(req
);
4890 NFS_ZFREE(nfs_req_zone
, req
);
4897 * Set up a new null proc request to exchange GSS context tokens with the
4898 * server. Associate the context that we are setting up with the request that we
4905 struct nfsm_chain
*nmrest
,
4909 struct nfs_gss_clnt_ctx
*cp
, /* Set to gss context to renew or setup */
4910 struct nfsm_chain
*nmrepp
,
4914 int error
, wait
= 1;
4916 req
= zalloc_flags(nfs_req_zone
, Z_WAITOK
);
4917 if ((error
= nfs_request_create(NULL
, mp
, nmrest
, NFSPROC_NULL
, thd
, cred
, &req
))) {
4920 req
->r_flags
|= (flags
& R_OPTMASK
);
4923 printf("nfs_request_gss request has no context\n");
4924 nfs_request_rele(req
);
4925 error
= NFSERR_EAUTH
;
4928 nfs_gss_clnt_ctx_ref(req
, cp
);
4931 * Don't wait for a reply to a context destroy advisory
4932 * to avoid hanging on a dead server.
4934 if (cp
->gss_clnt_proc
== RPCSEC_GSS_DESTROY
) {
4938 FSDBG_TOP(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, 0);
4941 req
->r_flags
&= ~R_RESTART
;
4942 if ((error
= nfs_request_add_header(req
))) {
4946 if ((error
= nfs_request_send(req
, wait
))) {
4953 nfs_request_wait(req
);
4954 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
4957 } while (req
->r_flags
& R_RESTART
);
4959 FSDBG_BOT(273, R_XID32(req
->r_xid
), NULL
, NFSPROC_NULL
, error
);
4961 nfs_gss_clnt_ctx_unref(req
);
4962 nfs_request_rele(req
);
4964 NFS_ZFREE(nfs_req_zone
, req
);
4967 #endif /* CONFIG_NFS_GSS */
4970 * Create and start an asynchronous NFS request.
4975 mount_t mp
, /* used only if !np */
4976 struct nfsm_chain
*nmrest
,
4980 struct nfsreq_secinfo_args
*si
,
4982 struct nfsreq_cbinfo
*cb
,
4983 struct nfsreq
**reqp
)
4986 struct nfsmount
*nmp
;
4989 error
= nfs_request_create(np
, mp
, nmrest
, procnum
, thd
, cred
, reqp
);
4991 FSDBG(274, (req
? R_XID32(req
->r_xid
) : 0), np
, procnum
, error
);
4995 req
->r_flags
|= (flags
& R_OPTMASK
);
4996 req
->r_flags
|= R_ASYNC
;
4998 req
->r_secinfo
= *si
;
5001 req
->r_callback
= *cb
;
5003 error
= nfs_request_add_header(req
);
5005 req
->r_flags
|= R_WAITSENT
;
5006 if (req
->r_callback
.rcb_func
) {
5007 nfs_request_ref(req
, 0);
5009 error
= nfs_request_send(req
, 1);
5010 lck_mtx_lock(&req
->r_mtx
);
5011 if (!error
&& !(req
->r_flags
& R_SENT
) && req
->r_callback
.rcb_func
) {
5012 /* make sure to wait until this async I/O request gets sent */
5013 int slpflag
= (req
->r_nmp
&& NMFLAG(req
->r_nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) ? PCATCH
: 0;
5014 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
5015 while (!(req
->r_flags
& R_SENT
)) {
5017 if ((req
->r_flags
& R_RESENDQ
) && !nfs_mount_gone(nmp
)) {
5018 lck_mtx_lock(&nmp
->nm_lock
);
5019 if ((req
->r_flags
& R_RESENDQ
) && (nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
5021 * It's not going to get off the resend queue if we're in recovery.
5022 * So, just take it off ourselves. We could be holding mount state
5023 * busy and thus holding up the start of recovery.
5025 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
5026 req
->r_flags
&= ~R_RESENDQ
;
5027 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
5028 lck_mtx_unlock(&nmp
->nm_lock
);
5029 req
->r_flags
|= R_SENDING
;
5030 lck_mtx_unlock(&req
->r_mtx
);
5031 error
= nfs_send(req
, 1);
5032 /* Remove the R_RESENDQ reference */
5033 nfs_request_rele(req
);
5034 lck_mtx_lock(&req
->r_mtx
);
5040 lck_mtx_unlock(&nmp
->nm_lock
);
5042 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
5045 msleep(req
, &req
->r_mtx
, slpflag
| (PZERO
- 1), "nfswaitsent", &ts
);
5049 sent
= req
->r_flags
& R_SENT
;
5050 lck_mtx_unlock(&req
->r_mtx
);
5051 if (error
&& req
->r_callback
.rcb_func
&& !sent
) {
5052 nfs_request_rele(req
);
5055 FSDBG(274, R_XID32(req
->r_xid
), np
, procnum
, error
);
5056 if (error
|| req
->r_callback
.rcb_func
) {
5057 nfs_request_rele(req
);
5064 * Wait for and finish an asynchronous NFS request.
5067 nfs_request_async_finish(
5069 struct nfsm_chain
*nmrepp
,
5073 int error
= 0, asyncio
= req
->r_callback
.rcb_func
? 1 : 0;
5074 struct nfsmount
*nmp
;
5076 lck_mtx_lock(&req
->r_mtx
);
5078 req
->r_flags
|= R_ASYNCWAIT
;
5080 while (req
->r_flags
& R_RESENDQ
) { /* wait until the request is off the resend queue */
5081 struct timespec ts
= { .tv_sec
= 2, .tv_nsec
= 0 };
5083 if ((nmp
= req
->r_nmp
)) {
5084 lck_mtx_lock(&nmp
->nm_lock
);
5085 if ((req
->r_flags
& R_RESENDQ
) && (nmp
->nm_state
& NFSSTA_RECOVER
) && (req
->r_rchain
.tqe_next
!= NFSREQNOLIST
)) {
5087 * It's not going to get off the resend queue if we're in recovery.
5088 * So, just take it off ourselves. We could be holding mount state
5089 * busy and thus holding up the start of recovery.
5091 TAILQ_REMOVE(&nmp
->nm_resendq
, req
, r_rchain
);
5092 req
->r_flags
&= ~R_RESENDQ
;
5093 req
->r_rchain
.tqe_next
= NFSREQNOLIST
;
5094 /* Remove the R_RESENDQ reference */
5095 assert(req
->r_refs
> 0);
5097 lck_mtx_unlock(&nmp
->nm_lock
);
5100 lck_mtx_unlock(&nmp
->nm_lock
);
5102 if ((error
= nfs_sigintr(req
->r_nmp
, req
, req
->r_thread
, 0))) {
5105 msleep(req
, &req
->r_mtx
, PZERO
- 1, "nfsresendqwait", &ts
);
5107 lck_mtx_unlock(&req
->r_mtx
);
5110 nfs_request_wait(req
);
5111 error
= nfs_request_finish(req
, nmrepp
, status
);
5114 while (!error
&& (req
->r_flags
& R_RESTART
)) {
5116 assert(req
->r_achain
.tqe_next
== NFSREQNOLIST
);
5117 lck_mtx_lock(&req
->r_mtx
);
5118 req
->r_flags
&= ~R_IOD
;
5119 if (req
->r_resendtime
) { /* send later */
5120 nfs_asyncio_resend(req
);
5121 lck_mtx_unlock(&req
->r_mtx
);
5124 lck_mtx_unlock(&req
->r_mtx
);
5127 req
->r_flags
&= ~R_RESTART
;
5128 if ((error
= nfs_request_add_header(req
))) {
5131 if ((error
= nfs_request_send(req
, !asyncio
))) {
5137 nfs_request_wait(req
);
5138 if ((error
= nfs_request_finish(req
, nmrepp
, status
))) {
5146 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, error
);
5147 nfs_request_rele(req
);
5152 * Cancel a pending asynchronous NFS request.
5155 nfs_request_async_cancel(struct nfsreq
*req
)
5157 FSDBG(275, R_XID32(req
->r_xid
), req
->r_np
, req
->r_procnum
, 0xD1ED1E);
5158 nfs_request_rele(req
);
5162 * Flag a request as being terminated.
5165 nfs_softterm(struct nfsreq
*req
)
5167 struct nfsmount
*nmp
= req
->r_nmp
;
5168 req
->r_flags
|= R_SOFTTERM
;
5169 req
->r_error
= ETIMEDOUT
;
5170 if (!(req
->r_flags
& R_CWND
) || nfs_mount_gone(nmp
)) {
5173 /* update congestion window */
5174 req
->r_flags
&= ~R_CWND
;
5175 lck_mtx_lock(&nmp
->nm_lock
);
5176 FSDBG(532, R_XID32(req
->r_xid
), req
, nmp
->nm_sent
, nmp
->nm_cwnd
);
5177 nmp
->nm_sent
-= NFS_CWNDSCALE
;
5178 if ((nmp
->nm_sent
< nmp
->nm_cwnd
) && !TAILQ_EMPTY(&nmp
->nm_cwndq
)) {
5179 /* congestion window is open, poke the cwnd queue */
5180 struct nfsreq
*req2
= TAILQ_FIRST(&nmp
->nm_cwndq
);
5181 TAILQ_REMOVE(&nmp
->nm_cwndq
, req2
, r_cchain
);
5182 req2
->r_cchain
.tqe_next
= NFSREQNOLIST
;
5185 lck_mtx_unlock(&nmp
->nm_lock
);
5189 * Ensure req isn't in use by the timer, then dequeue it.
5192 nfs_reqdequeue(struct nfsreq
*req
)
5194 lck_mtx_lock(nfs_request_mutex
);
5195 while (req
->r_lflags
& RL_BUSY
) {
5196 req
->r_lflags
|= RL_WAITING
;
5197 msleep(&req
->r_lflags
, nfs_request_mutex
, PSOCK
, "reqdeq", NULL
);
5199 if (req
->r_lflags
& RL_QUEUED
) {
5200 TAILQ_REMOVE(&nfs_reqq
, req
, r_chain
);
5201 req
->r_lflags
&= ~RL_QUEUED
;
5203 lck_mtx_unlock(nfs_request_mutex
);
5207 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5208 * free()'d out from under it.
5211 nfs_reqbusy(struct nfsreq
*req
)
5213 if (req
->r_lflags
& RL_BUSY
) {
5214 panic("req locked");
5216 req
->r_lflags
|= RL_BUSY
;
5220 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5223 nfs_reqnext(struct nfsreq
*req
)
5225 struct nfsreq
* nextreq
;
5231 * We need to get and busy the next req before signalling the
5232 * current one, otherwise wakeup() may block us and we'll race to
5233 * grab the next req.
5235 nextreq
= TAILQ_NEXT(req
, r_chain
);
5236 if (nextreq
!= NULL
) {
5237 nfs_reqbusy(nextreq
);
5239 /* unbusy and signal. */
5240 req
->r_lflags
&= ~RL_BUSY
;
5241 if (req
->r_lflags
& RL_WAITING
) {
5242 req
->r_lflags
&= ~RL_WAITING
;
5243 wakeup(&req
->r_lflags
);
5249 * NFS request queue timer routine
5251 * Scan the NFS request queue for any requests that have timed out.
5253 * Alert the system of unresponsive servers.
5254 * Mark expired requests on soft mounts as terminated.
5255 * For UDP, mark/signal requests for retransmission.
5258 nfs_request_timer(__unused
void *param0
, __unused
void *param1
)
5261 struct nfsmount
*nmp
;
5262 int timeo
, maxtime
, finish_asyncio
, error
;
5264 TAILQ_HEAD(nfs_mount_pokeq
, nfsmount
) nfs_mount_poke_queue
;
5265 TAILQ_INIT(&nfs_mount_poke_queue
);
5268 lck_mtx_lock(nfs_request_mutex
);
5269 req
= TAILQ_FIRST(&nfs_reqq
);
5270 if (req
== NULL
) { /* no requests - turn timer off */
5271 nfs_request_timer_on
= 0;
5272 lck_mtx_unlock(nfs_request_mutex
);
5279 for (; req
!= NULL
; req
= nfs_reqnext(req
)) {
5282 NFS_SOCK_DBG("Found a request with out a mount!\n");
5285 if (req
->r_error
|| req
->r_nmrep
.nmc_mhead
) {
5288 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 0))) {
5289 if (req
->r_callback
.rcb_func
!= NULL
) {
5290 /* async I/O RPC needs to be finished */
5291 lck_mtx_lock(&req
->r_mtx
);
5292 req
->r_error
= error
;
5293 finish_asyncio
= !(req
->r_flags
& R_WAITSENT
);
5295 lck_mtx_unlock(&req
->r_mtx
);
5296 if (finish_asyncio
) {
5297 nfs_asyncio_finish(req
);
5303 lck_mtx_lock(&req
->r_mtx
);
5305 if (nmp
->nm_tprintf_initial_delay
&&
5306 ((req
->r_rexmit
> 2) || (req
->r_flags
& R_RESENDERR
)) &&
5307 ((req
->r_lastmsg
+ nmp
->nm_tprintf_delay
) < now
.tv_sec
)) {
5308 req
->r_lastmsg
= now
.tv_sec
;
5309 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5310 "not responding", 1);
5311 req
->r_flags
|= R_TPRINTFMSG
;
5312 lck_mtx_lock(&nmp
->nm_lock
);
5313 if (!(nmp
->nm_state
& NFSSTA_MOUNTED
)) {
5314 lck_mtx_unlock(&nmp
->nm_lock
);
5315 /* we're not yet completely mounted and */
5316 /* we can't complete an RPC, so we fail */
5317 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5319 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5321 lck_mtx_unlock(&req
->r_mtx
);
5322 if (finish_asyncio
) {
5323 nfs_asyncio_finish(req
);
5327 lck_mtx_unlock(&nmp
->nm_lock
);
5331 * Put a reasonable limit on the maximum timeout,
5332 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5334 if (!(NMFLAG(nmp
, SOFT
) || (req
->r_flags
& R_SOFT
)) && !nfs_can_squish(nmp
)) {
5335 maxtime
= NFS_MAXTIMEO
;
5336 } else if ((req
->r_flags
& (R_SETUP
| R_RECOVER
)) ||
5337 ((nmp
->nm_reconnect_start
<= 0) || ((now
.tv_sec
- nmp
->nm_reconnect_start
) < 8))) {
5338 maxtime
= (NFS_MAXTIMEO
/ (nmp
->nm_timeouts
+ 1)) / 2;
5340 maxtime
= NFS_MINTIMEO
/ 4;
5344 * Check for request timeout.
5346 if (req
->r_rtt
>= 0) {
5348 lck_mtx_lock(&nmp
->nm_lock
);
5349 if (req
->r_flags
& R_RESENDERR
) {
5350 /* with resend errors, retry every few seconds */
5353 if (req
->r_procnum
== NFSPROC_NULL
&& req
->r_gss_ctx
!= NULL
) {
5354 timeo
= NFS_MINIDEMTIMEO
; // gss context setup
5355 } else if (NMFLAG(nmp
, DUMBTIMER
)) {
5356 timeo
= nmp
->nm_timeo
;
5358 timeo
= NFS_RTO(nmp
, proct
[req
->r_procnum
]);
5361 /* ensure 62.5 ms floor */
5362 while (16 * timeo
< hz
) {
5365 if (nmp
->nm_timeouts
> 0) {
5366 timeo
*= nfs_backoff
[nmp
->nm_timeouts
- 1];
5369 /* limit timeout to max */
5370 if (timeo
> maxtime
) {
5373 if (req
->r_rtt
<= timeo
) {
5374 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req
->r_rtt
, timeo
);
5375 lck_mtx_unlock(&nmp
->nm_lock
);
5376 lck_mtx_unlock(&req
->r_mtx
);
5379 /* The request has timed out */
5380 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5381 req
->r_procnum
, proct
[req
->r_procnum
],
5382 req
->r_xid
, req
->r_rtt
, timeo
, nmp
->nm_timeouts
,
5383 (now
.tv_sec
- req
->r_start
) * NFS_HZ
, maxtime
);
5384 if (nmp
->nm_timeouts
< 8) {
5387 if (nfs_mount_check_dead_timeout(nmp
)) {
5388 /* Unbusy this request */
5389 req
->r_lflags
&= ~RL_BUSY
;
5390 if (req
->r_lflags
& RL_WAITING
) {
5391 req
->r_lflags
&= ~RL_WAITING
;
5392 wakeup(&req
->r_lflags
);
5394 lck_mtx_unlock(&req
->r_mtx
);
5396 /* No need to poke this mount */
5397 if (nmp
->nm_sockflags
& NMSOCK_POKE
) {
5398 nmp
->nm_sockflags
&= ~NMSOCK_POKE
;
5399 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5401 /* Release our lock state, so we can become a zombie */
5402 lck_mtx_unlock(nfs_request_mutex
);
5405 * Note nfs_mount_make zombie(nmp) must be
5406 * called with nm_lock held. After doing some
5407 * work we release nm_lock in
5408 * nfs_make_mount_zombie with out acquiring any
5409 * other locks. (Later, in nfs_mount_zombie we
5410 * will acquire nfs_request_mutex, r_mtx,
5411 * nm_lock in that order). So we should not be
5412 * introducing deadlock here. We take a reference
5413 * on the mount so that its still there when we
5417 nfs_mount_make_zombie(nmp
);
5418 lck_mtx_unlock(&nmp
->nm_lock
);
5419 nfs_mount_rele(nmp
);
5422 * All the request for this mount have now been
5423 * removed from the request queue. Restart to
5424 * process the remaining mounts
5429 /* if it's been a few seconds, try poking the socket */
5430 if ((nmp
->nm_sotype
== SOCK_STREAM
) &&
5431 ((now
.tv_sec
- req
->r_start
) >= 3) &&
5432 !(nmp
->nm_sockflags
& (NMSOCK_POKE
| NMSOCK_UNMOUNT
)) &&
5433 (nmp
->nm_sockflags
& NMSOCK_READY
)) {
5434 nmp
->nm_sockflags
|= NMSOCK_POKE
;
5436 * We take a ref on the mount so that we know the mount will still be there
5437 * when we process the nfs_mount_poke_queue. An unmount request will block
5438 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5439 * the reference after calling nfs_sock_poke below;
5442 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5444 lck_mtx_unlock(&nmp
->nm_lock
);
5447 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5448 if ((NMFLAG(nmp
, SOFT
) || (req
->r_flags
& (R_SETUP
| R_RECOVER
| R_SOFT
))) &&
5449 ((req
->r_rexmit
>= req
->r_retry
) || /* too many */
5450 ((now
.tv_sec
- req
->r_start
) * NFS_HZ
> maxtime
))) { /* too long */
5451 OSAddAtomic64(1, &nfsstats
.rpctimeouts
);
5452 lck_mtx_lock(&nmp
->nm_lock
);
5453 if (!(nmp
->nm_state
& NFSSTA_TIMEO
)) {
5454 lck_mtx_unlock(&nmp
->nm_lock
);
5455 /* make sure we note the unresponsive server */
5456 /* (maxtime may be less than tprintf delay) */
5457 nfs_down(req
->r_nmp
, req
->r_thread
, 0, NFSSTA_TIMEO
,
5458 "not responding", 1);
5459 req
->r_lastmsg
= now
.tv_sec
;
5460 req
->r_flags
|= R_TPRINTFMSG
;
5462 lck_mtx_unlock(&nmp
->nm_lock
);
5464 if (req
->r_flags
& R_NOINTR
) {
5465 /* don't terminate nointr requests on timeout */
5466 lck_mtx_unlock(&req
->r_mtx
);
5469 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5470 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
,
5471 now
.tv_sec
- req
->r_start
);
5473 finish_asyncio
= ((req
->r_callback
.rcb_func
!= NULL
) && !(req
->r_flags
& R_WAITSENT
));
5475 lck_mtx_unlock(&req
->r_mtx
);
5476 if (finish_asyncio
) {
5477 nfs_asyncio_finish(req
);
5482 /* for TCP, only resend if explicitly requested */
5483 if ((nmp
->nm_sotype
== SOCK_STREAM
) && !(req
->r_flags
& R_MUSTRESEND
)) {
5484 if (++req
->r_rexmit
> NFS_MAXREXMIT
) {
5485 req
->r_rexmit
= NFS_MAXREXMIT
;
5488 lck_mtx_unlock(&req
->r_mtx
);
5493 * The request needs to be (re)sent. Kick the requester to resend it.
5494 * (unless it's already marked as needing a resend)
5496 if ((req
->r_flags
& R_MUSTRESEND
) && (req
->r_rtt
== -1)) {
5497 lck_mtx_unlock(&req
->r_mtx
);
5500 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5501 req
->r_procnum
, req
->r_xid
, req
->r_flags
, req
->r_rtt
);
5502 req
->r_flags
|= R_MUSTRESEND
;
5505 if ((req
->r_flags
& (R_IOD
| R_ASYNC
| R_ASYNCWAIT
| R_SENDING
)) == R_ASYNC
) {
5506 nfs_asyncio_resend(req
);
5508 lck_mtx_unlock(&req
->r_mtx
);
5511 lck_mtx_unlock(nfs_request_mutex
);
5513 /* poke any sockets */
5514 while ((nmp
= TAILQ_FIRST(&nfs_mount_poke_queue
))) {
5515 TAILQ_REMOVE(&nfs_mount_poke_queue
, nmp
, nm_pokeq
);
5517 nfs_mount_rele(nmp
);
5520 nfs_interval_timer_start(nfs_request_timer_call
, NFS_REQUESTDELAY
);
5524 * check a thread's proc for the "noremotehang" flag.
5527 nfs_noremotehang(thread_t thd
)
5529 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
5530 return p
&& proc_noremotehang(p
);
5534 * Test for a termination condition pending on the process.
5535 * This is used to determine if we need to bail on a mount.
5536 * ETIMEDOUT is returned if there has been a soft timeout.
5537 * EINTR is returned if there is a signal pending that is not being ignored
5538 * and the mount is interruptable, or if we are a thread that is in the process
5539 * of cancellation (also SIGKILL posted).
5541 extern int sigprop
[NSIG
+ 1];
5543 nfs_sigintr(struct nfsmount
*nmp
, struct nfsreq
*req
, thread_t thd
, int nmplocked
)
5552 if (req
&& (req
->r_flags
& R_SOFTTERM
)) {
5553 return ETIMEDOUT
; /* request has been terminated. */
5555 if (req
&& (req
->r_flags
& R_NOINTR
)) {
5556 thd
= NULL
; /* don't check for signal on R_NOINTR */
5559 lck_mtx_lock(&nmp
->nm_lock
);
5561 if (nmp
->nm_state
& NFSSTA_FORCE
) {
5562 /* If a force unmount is in progress then fail. */
5564 } else if (vfs_isforce(nmp
->nm_mountp
)) {
5565 /* Someone is unmounting us, go soft and mark it. */
5566 NFS_BITMAP_SET(nmp
->nm_flags
, NFS_MFLAG_SOFT
);
5567 nmp
->nm_state
|= NFSSTA_FORCE
;
5570 /* Check if the mount is marked dead. */
5571 if (!error
&& (nmp
->nm_state
& NFSSTA_DEAD
)) {
5576 * If the mount is hung and we've requested not to hang
5577 * on remote filesystems, then bail now.
5579 if (current_proc() != kernproc
&&
5580 !error
&& (nmp
->nm_state
& NFSSTA_TIMEO
) && nfs_noremotehang(thd
)) {
5585 lck_mtx_unlock(&nmp
->nm_lock
);
5591 /* may not have a thread for async I/O */
5592 if (thd
== NULL
|| current_proc() == kernproc
) {
5597 * Check if the process is aborted, but don't interrupt if we
5598 * were killed by a signal and this is the exiting thread which
5599 * is attempting to dump core.
5601 if (((p
= current_proc()) != kernproc
) && current_thread_aborted() &&
5602 (!(p
->p_acflag
& AXSIG
) || (p
->exit_thread
!= current_thread()) ||
5603 (p
->p_sigacts
== NULL
) ||
5604 (p
->p_sigacts
->ps_sig
< 1) || (p
->p_sigacts
->ps_sig
> NSIG
) ||
5605 !(sigprop
[p
->p_sigacts
->ps_sig
] & SA_CORE
))) {
5609 /* mask off thread and process blocked signals. */
5610 if (NMFLAG(nmp
, INTR
) && ((p
= get_bsdthreadtask_info(thd
))) &&
5611 proc_pendingsignals(p
, NFSINT_SIGMASK
)) {
5618 * Lock a socket against others.
5619 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5620 * and also to avoid race conditions between the processes with nfs requests
5621 * in progress when a reconnect is necessary.
5624 nfs_sndlock(struct nfsreq
*req
)
5626 struct nfsmount
*nmp
= req
->r_nmp
;
5628 int error
= 0, slpflag
= 0;
5629 struct timespec ts
= { .tv_sec
= 0, .tv_nsec
= 0 };
5631 if (nfs_mount_gone(nmp
)) {
5635 lck_mtx_lock(&nmp
->nm_lock
);
5636 statep
= &nmp
->nm_state
;
5638 if (NMFLAG(nmp
, INTR
) && req
->r_thread
&& !(req
->r_flags
& R_NOINTR
)) {
5641 while (*statep
& NFSSTA_SNDLOCK
) {
5642 if ((error
= nfs_sigintr(nmp
, req
, req
->r_thread
, 1))) {
5645 *statep
|= NFSSTA_WANTSND
;
5646 if (nfs_noremotehang(req
->r_thread
)) {
5649 msleep(statep
, &nmp
->nm_lock
, slpflag
| (PZERO
- 1), "nfsndlck", &ts
);
5650 if (slpflag
== PCATCH
) {
5656 *statep
|= NFSSTA_SNDLOCK
;
5658 lck_mtx_unlock(&nmp
->nm_lock
);
5663 * Unlock the stream socket for others.
5666 nfs_sndunlock(struct nfsreq
*req
)
5668 struct nfsmount
*nmp
= req
->r_nmp
;
5669 int *statep
, wake
= 0;
5674 lck_mtx_lock(&nmp
->nm_lock
);
5675 statep
= &nmp
->nm_state
;
5676 if ((*statep
& NFSSTA_SNDLOCK
) == 0) {
5677 panic("nfs sndunlock");
5679 *statep
&= ~(NFSSTA_SNDLOCK
| NFSSTA_SENDING
);
5680 if (*statep
& NFSSTA_WANTSND
) {
5681 *statep
&= ~NFSSTA_WANTSND
;
5684 lck_mtx_unlock(&nmp
->nm_lock
);
5692 struct nfsmount
*nmp
,
5694 struct sockaddr
*saddr
,
5701 struct nfsm_chain
*nmrep
)
5703 int error
= 0, on
= 1, try, sendat
= 2, soproto
, recv
, optlen
, restoreto
= 0;
5704 socket_t newso
= NULL
;
5705 struct sockaddr_storage ss
;
5706 struct timeval orig_rcvto
, orig_sndto
, tv
= { .tv_sec
= 1, .tv_usec
= 0 };
5707 mbuf_t m
, mrep
= NULL
;
5709 uint32_t rxid
= 0, reply
= 0, reply_status
, rejected_status
;
5710 uint32_t verf_type
, verf_len
, accepted_status
;
5711 size_t readlen
, sentlen
;
5712 struct nfs_rpc_record_state nrrs
;
5715 /* create socket and set options */
5716 if (saddr
->sa_family
== AF_LOCAL
) {
5719 soproto
= (sotype
== SOCK_DGRAM
) ? IPPROTO_UDP
: IPPROTO_TCP
;
5721 if ((error
= sock_socket(saddr
->sa_family
, sotype
, soproto
, NULL
, NULL
, &newso
))) {
5725 if (bindresv
&& saddr
->sa_family
!= AF_LOCAL
) {
5726 int level
= (saddr
->sa_family
== AF_INET
) ? IPPROTO_IP
: IPPROTO_IPV6
;
5727 int optname
= (saddr
->sa_family
== AF_INET
) ? IP_PORTRANGE
: IPV6_PORTRANGE
;
5728 int portrange
= IP_PORTRANGE_LOW
;
5729 error
= sock_setsockopt(newso
, level
, optname
, &portrange
, sizeof(portrange
));
5731 ss
.ss_len
= saddr
->sa_len
;
5732 ss
.ss_family
= saddr
->sa_family
;
5733 if (ss
.ss_family
== AF_INET
) {
5734 ((struct sockaddr_in
*)&ss
)->sin_addr
.s_addr
= INADDR_ANY
;
5735 ((struct sockaddr_in
*)&ss
)->sin_port
= htons(0);
5736 } else if (ss
.ss_family
== AF_INET6
) {
5737 ((struct sockaddr_in6
*)&ss
)->sin6_addr
= in6addr_any
;
5738 ((struct sockaddr_in6
*)&ss
)->sin6_port
= htons(0);
5743 error
= sock_bind(newso
, (struct sockaddr
*)&ss
);
5748 if (sotype
== SOCK_STREAM
) {
5749 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5752 error
= sock_connect(newso
, saddr
, MSG_DONTWAIT
);
5753 if (error
== EINPROGRESS
) {
5758 while ((error
= sock_connectwait(newso
, &tv
)) == EINPROGRESS
) {
5759 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5760 if (++count
>= NFS_AUX_CONNECTION_TIMEOUT
) {
5767 if (((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
)))) ||
5768 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
)))) ||
5769 ((error
= sock_setsockopt(newso
, SOL_SOCKET
, SO_NOADDRERR
, &on
, sizeof(on
))))) {
5774 /* make sure socket is using a one second timeout in this function */
5775 optlen
= sizeof(orig_rcvto
);
5776 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, &optlen
);
5778 optlen
= sizeof(orig_sndto
);
5779 error
= sock_getsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, &optlen
);
5782 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &tv
, sizeof(tv
));
5783 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &tv
, sizeof(tv
));
5788 if (sotype
== SOCK_STREAM
) {
5789 sendat
= 0; /* we only resend the request for UDP */
5790 nfs_rpc_record_state_init(&nrrs
);
5793 for (try = 0; try < timeo
; try++) {
5794 if ((error
= nfs_sigintr(nmp
, NULL
, !try ? NULL
: thd
, 0))) {
5797 if (!try || (try == sendat
)) {
5798 /* send the request (resending periodically for UDP) */
5799 if ((error
= mbuf_copym(mreq
, 0, MBUF_COPYALL
, MBUF_WAITOK
, &m
))) {
5802 bzero(&msg
, sizeof(msg
));
5803 if ((sotype
== SOCK_DGRAM
) && !sock_isconnected(so
)) {
5804 msg
.msg_name
= saddr
;
5805 msg
.msg_namelen
= saddr
->sa_len
;
5807 if ((error
= sock_sendmbuf(so
, &msg
, m
, 0, &sentlen
))) {
5815 /* wait for the response */
5816 if (sotype
== SOCK_STREAM
) {
5817 /* try to read (more of) record */
5818 error
= nfs_rpc_record_read(so
, &nrrs
, 0, &recv
, &mrep
);
5819 /* if we don't have the whole record yet, we'll keep trying */
5822 bzero(&msg
, sizeof(msg
));
5823 error
= sock_receivembuf(so
, &msg
, &mrep
, 0, &readlen
);
5825 if (error
== EWOULDBLOCK
) {
5829 /* parse the response */
5830 nfsm_chain_dissect_init(error
, nmrep
, mrep
);
5831 nfsm_chain_get_32(error
, nmrep
, rxid
);
5832 nfsm_chain_get_32(error
, nmrep
, reply
);
5834 if ((rxid
!= xid
) || (reply
!= RPC_REPLY
)) {
5837 nfsm_chain_get_32(error
, nmrep
, reply_status
);
5839 if (reply_status
== RPC_MSGDENIED
) {
5840 nfsm_chain_get_32(error
, nmrep
, rejected_status
);
5842 error
= (rejected_status
== RPC_MISMATCH
) ? ERPCMISMATCH
: EACCES
;
5845 nfsm_chain_get_32(error
, nmrep
, verf_type
); /* verifier flavor */
5846 nfsm_chain_get_32(error
, nmrep
, verf_len
); /* verifier length */
5849 nfsm_chain_adv(error
, nmrep
, nfsm_rndup(verf_len
));
5851 nfsm_chain_get_32(error
, nmrep
, accepted_status
);
5853 switch (accepted_status
) {
5857 case RPC_PROGUNAVAIL
:
5858 error
= EPROGUNAVAIL
;
5860 case RPC_PROGMISMATCH
:
5861 error
= EPROGMISMATCH
;
5863 case RPC_PROCUNAVAIL
:
5864 error
= EPROCUNAVAIL
;
5869 case RPC_SYSTEM_ERR
:
5878 sock_setsockopt(so
, SOL_SOCKET
, SO_RCVTIMEO
, &orig_rcvto
, sizeof(tv
));
5879 sock_setsockopt(so
, SOL_SOCKET
, SO_SNDTIMEO
, &orig_sndto
, sizeof(tv
));
5882 sock_shutdown(newso
, SHUT_RDWR
);
5891 struct nfsmount
*nmp
,
5893 struct sockaddr
*sa
,
5900 thread_t thd
= vfs_context_thread(ctx
);
5901 kauth_cred_t cred
= vfs_context_ucred(ctx
);
5902 struct sockaddr_storage ss
;
5903 struct sockaddr
*saddr
= (struct sockaddr
*)&ss
;
5904 static struct sockaddr_un rpcbind_cots
= {
5905 sizeof(struct sockaddr_un
),
5909 static struct sockaddr_un rpcbind_clts
= {
5910 sizeof(struct sockaddr_un
),
5914 struct nfsm_chain nmreq
, nmrep
;
5916 int error
= 0, ip
, pmprog
, pmvers
, pmproc
;
5917 uint32_t ualen
= 0, scopeid
= 0, port32
;
5919 char uaddr
[MAX_IPv6_STR_LEN
+ 16];
5921 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
5922 if (saddr
->sa_family
== AF_INET
) {
5926 pmproc
= PMAPPROC_GETPORT
;
5927 } else if (saddr
->sa_family
== AF_INET6
) {
5931 pmproc
= RPCBPROC_GETVERSADDR
;
5932 } else if (saddr
->sa_family
== AF_LOCAL
) {
5936 pmproc
= RPCBPROC_GETVERSADDR
;
5937 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un
*)sa
)->sun_path
);
5938 saddr
= (struct sockaddr
*)((stype
== SOCK_STREAM
) ? &rpcbind_cots
: &rpcbind_clts
);
5942 nfsm_chain_null(&nmreq
);
5943 nfsm_chain_null(&nmrep
);
5946 /* send portmapper request to get port/uaddr */
5948 ((struct sockaddr_in
*)saddr
)->sin_port
= htons(PMAPPORT
);
5949 } else if (ip
== 6) {
5950 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(PMAPPORT
);
5952 nfsm_chain_build_alloc_init(error
, &nmreq
, 8 * NFSX_UNSIGNED
);
5953 nfsm_chain_add_32(error
, &nmreq
, protocol
);
5954 nfsm_chain_add_32(error
, &nmreq
, vers
);
5956 nfsm_chain_add_32(error
, &nmreq
, stype
== SOCK_STREAM
? IPPROTO_TCP
: IPPROTO_UDP
);
5957 nfsm_chain_add_32(error
, &nmreq
, 0);
5959 if (stype
== SOCK_STREAM
) {
5961 nfsm_chain_add_string(error
, &nmreq
, "tcp6", 4);
5963 nfsm_chain_add_string(error
, &nmreq
, "ticotsord", 9);
5967 nfsm_chain_add_string(error
, &nmreq
, "udp6", 4);
5969 nfsm_chain_add_string(error
, &nmreq
, "ticlts", 6);
5972 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* uaddr */
5973 nfsm_chain_add_string(error
, &nmreq
, "", 0); /* owner */
5975 nfsm_chain_build_done(error
, &nmreq
);
5977 error
= nfsm_rpchead2(nmp
, stype
, pmprog
, pmvers
, pmproc
,
5978 RPCAUTH_SYS
, cred
, NULL
, nmreq
.nmc_mhead
, &xid
, &mreq
);
5980 nmreq
.nmc_mhead
= NULL
;
5982 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq
);
5983 error
= nfs_aux_request(nmp
, thd
, saddr
, so
,
5984 stype
, mreq
, R_XID32(xid
), 0, timeo
, &nmrep
);
5985 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep
.nmc_mhead
);
5986 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error
, protocol
, vers
,
5987 (saddr
->sa_family
== AF_LOCAL
) ? ((struct sockaddr_un
*)saddr
)->sun_path
:
5988 (saddr
->sa_family
== AF_INET6
) ? "INET6 socket" : "INET socket");
5990 /* grab port from portmap response */
5992 nfsm_chain_get_32(error
, &nmrep
, port32
);
5994 if (NFS_PORT_INVALID(port32
)) {
5997 ((struct sockaddr_in
*)sa
)->sin_port
= htons((in_port_t
)port32
);
6001 /* get uaddr string and convert to sockaddr */
6002 nfsm_chain_get_32(error
, &nmrep
, ualen
);
6004 if (ualen
> (sizeof(uaddr
) - 1)) {
6008 /* program is not available, just return a zero port */
6009 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
6011 ((struct sockaddr_in6
*)saddr
)->sin6_port
= htons(0);
6013 ((struct sockaddr_un
*)saddr
)->sun_path
[0] = '\0';
6015 NFS_SOCK_DBG("Program %u version %u unavailable", protocol
, vers
);
6017 nfsm_chain_get_opaque(error
, &nmrep
, ualen
, uaddr
);
6018 NFS_SOCK_DBG("Got uaddr %s\n", uaddr
);
6020 uaddr
[ualen
] = '\0';
6022 scopeid
= ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
;
6024 if (!nfs_uaddr2sockaddr(uaddr
, saddr
)) {
6027 if (ip
== 6 && scopeid
!= ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
) {
6028 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
, scopeid
);
6029 ((struct sockaddr_in6
*)saddr
)->sin6_scope_id
= scopeid
;
6034 if ((error
== EPROGMISMATCH
) || (error
== EPROCUNAVAIL
) || (error
== EIO
) || (error
== EBADRPC
)) {
6035 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6036 if (pmvers
== RPCBVERS4
) {
6037 /* fall back to v3 and GETADDR */
6039 pmproc
= RPCBPROC_GETADDR
;
6040 nfsm_chain_cleanup(&nmreq
);
6041 nfsm_chain_cleanup(&nmrep
);
6042 bcopy(sa
, saddr
, min(sizeof(ss
), sa
->sa_len
));
6049 bcopy(saddr
, sa
, min(saddr
->sa_len
, sa
->sa_len
));
6053 nfsm_chain_cleanup(&nmreq
);
6054 nfsm_chain_cleanup(&nmrep
);
6055 NFS_SOCK_DBG("Returned %d\n", error
);
6061 nfs_msg(thread_t thd
,
6066 proc_t p
= thd
? get_bsdthreadtask_info(thd
) : NULL
;
6070 tpr
= tprintf_open(p
);
6075 tprintf(tpr
, "nfs server %s: %s, error %d\n", server
, msg
, error
);
6077 tprintf(tpr
, "nfs server %s: %s\n", server
, msg
);
6083 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6084 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6085 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6086 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6087 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6089 uint32_t nfs_squishy_flags
= NFS_SQUISH_MOBILE_ONLY
| NFS_SQUISH_AUTOMOUNTED_ONLY
| NFS_SQUISH_QUICK
;
6090 uint32_t nfs_tcp_sockbuf
= 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6091 int32_t nfs_is_mobile
;
6093 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6094 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6097 * Could this mount be squished?
6100 nfs_can_squish(struct nfsmount
*nmp
)
6102 uint64_t flags
= vfs_flags(nmp
->nm_mountp
);
6103 int softsquish
= ((nfs_squishy_flags
& NFS_SQUISH_SOFT
) & NMFLAG(nmp
, SOFT
));
6105 if (!softsquish
&& (nfs_squishy_flags
& NFS_SQUISH_MOBILE_ONLY
) && nfs_is_mobile
== 0) {
6109 if ((nfs_squishy_flags
& NFS_SQUISH_AUTOMOUNTED_ONLY
) && (flags
& MNT_AUTOMOUNTED
) == 0) {
6117 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6118 * the mount may become "not responding". It's desirable to be able
6119 * to unmount these dead mounts, but only if there is no risk of
6120 * losing data or crashing applications. A "squishy" NFS mount is one
6121 * that can be force unmounted with little risk of harm.
6123 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6124 * in a squishy state iff it is allowed to be squishy and there are no
6125 * dirty pages and there are no mmapped files and there are no files
6126 * open for write. Mounts are allowed to be squishy is controlled by
6127 * the settings of the nfs_squishy_flags and its mobility state. These
6128 * flags can be set by sysctls.
6130 * If nfs_is_squishy determines that we are in a squishy state we will
6131 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6132 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6133 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6134 * update the current dead timeout with the mount's nm_deadtimeout
6135 * value set at mount time.
6137 * Assumes that nm_lock is held.
6139 * Note this routine is racey, but its effects on setting the
6140 * dead timeout only have effects when we're in trouble and are likely
6141 * to stay that way. Since by default its only for automounted
6142 * volumes on mobile machines; this is a reasonable trade off between
6143 * data integrity and user experience. It can be disabled or set via
6148 nfs_is_squishy(struct nfsmount
*nmp
)
6150 mount_t mp
= nmp
->nm_mountp
;
6152 int timeo
= (nfs_squishy_flags
& NFS_SQUISH_QUICK
) ? NFS_SQUISHY_QUICKTIMEOUT
: NFS_SQUISHY_DEADTIMEOUT
;
6154 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6155 vfs_statfs(mp
)->f_mntfromname
, nmp
->nm_curdeadtimeout
, nfs_is_mobile
);
6157 if (!nfs_can_squish(nmp
)) {
6161 timeo
= (nmp
->nm_deadtimeout
> timeo
) ? max(nmp
->nm_deadtimeout
/ 8, timeo
) : timeo
;
6162 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp
->nm_writers
, nmp
->nm_mappers
, timeo
);
6164 if (nmp
->nm_writers
== 0 && nmp
->nm_mappers
== 0) {
6165 uint64_t flags
= mp
? vfs_flags(mp
) : 0;
6169 * Walk the nfs nodes and check for dirty buffers it we're not
6170 * RDONLY and we've not already been declared as squishy since
6171 * this can be a bit expensive.
6173 if (!(flags
& MNT_RDONLY
) && !(nmp
->nm_state
& NFSSTA_SQUISHY
)) {
6174 squishy
= !nfs_mount_is_dirty(mp
);
6180 nmp
->nm_state
|= NFSSTA_SQUISHY
;
6182 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
6185 nmp
->nm_curdeadtimeout
= squishy
? timeo
: nmp
->nm_deadtimeout
;
6187 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp
->nm_curdeadtimeout
);
6193 * On a send operation, if we can't reach the server and we've got only one server to talk to
6194 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6195 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6198 nfs_is_dead(int error
, struct nfsmount
*nmp
)
6202 lck_mtx_lock(&nmp
->nm_lock
);
6203 if (nmp
->nm_state
& NFSSTA_DEAD
) {
6204 lck_mtx_unlock(&nmp
->nm_lock
);
6208 if ((error
!= ENETUNREACH
&& error
!= EHOSTUNREACH
&& error
!= EADDRNOTAVAIL
) ||
6209 !(nmp
->nm_locations
.nl_numlocs
== 1 && nmp
->nm_locations
.nl_locations
[0]->nl_servcount
== 1)) {
6210 lck_mtx_unlock(&nmp
->nm_lock
);
6214 if ((nfs_squishy_flags
& NFS_SQUISH_QUICK
) && nfs_is_squishy(nmp
)) {
6215 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
);
6216 fsid
= vfs_statfs(nmp
->nm_mountp
)->f_fsid
;
6217 lck_mtx_unlock(&nmp
->nm_lock
);
6218 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
6219 vfs_event_signal(&fsid
, VQ_DEAD
, 0);
6222 lck_mtx_unlock(&nmp
->nm_lock
);
6227 * If we've experienced timeouts and we're not really a
6228 * classic hard mount, then just return cached data to
6229 * the caller instead of likely hanging on an RPC.
6232 nfs_use_cache(struct nfsmount
*nmp
)
6235 *%%% We always let mobile users goto the cache,
6236 * perhaps we should not even require them to have
6239 int cache_ok
= (nfs_is_mobile
|| NMFLAG(nmp
, SOFT
) ||
6240 nfs_can_squish(nmp
) || nmp
->nm_deadtimeout
);
6242 int timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6245 * So if we have a timeout and we're not really a hard hard-mount,
6246 * return 1 to not get things out of the cache.
6249 return (nmp
->nm_state
& timeoutmask
) && cache_ok
;
6253 * Log a message that nfs or lockd server is unresponsive. Check if we
6254 * can be squished and if we can, or that our dead timeout has
6255 * expired, and we're not holding state, set our mount as dead, remove
6256 * our mount state and ask to be unmounted. If we are holding state
6257 * we're being called from the nfs_request_timer and will soon detect
6258 * that we need to unmount.
6261 nfs_down(struct nfsmount
*nmp
, thread_t thd
, int error
, int flags
, const char *msg
, int holding_state
)
6263 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
6264 uint32_t do_vfs_signal
= 0;
6267 if (nfs_mount_gone(nmp
)) {
6271 lck_mtx_lock(&nmp
->nm_lock
);
6273 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6274 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6275 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6277 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6279 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6280 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6282 if ((flags
& NFSSTA_TIMEO
) && !(nmp
->nm_state
& NFSSTA_TIMEO
)) {
6283 nmp
->nm_state
|= NFSSTA_TIMEO
;
6285 if ((flags
& NFSSTA_LOCKTIMEO
) && !(nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6286 nmp
->nm_state
|= NFSSTA_LOCKTIMEO
;
6288 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && !(nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6289 nmp
->nm_state
|= NFSSTA_JUKEBOXTIMEO
;
6292 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6294 nfs_is_squishy(nmp
);
6296 if (unresponsive
&& (nmp
->nm_curdeadtimeout
> 0)) {
6298 if (!wasunresponsive
) {
6299 nmp
->nm_deadto_start
= now
.tv_sec
;
6300 nfs_mount_sock_thread_wake(nmp
);
6301 } else if ((now
.tv_sec
- nmp
->nm_deadto_start
) > nmp
->nm_curdeadtimeout
&& !holding_state
) {
6302 if (!(nmp
->nm_state
& NFSSTA_DEAD
)) {
6303 printf("nfs server %s: %sdead\n", vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
,
6304 (nmp
->nm_curdeadtimeout
!= nmp
->nm_deadtimeout
) ? "squished " : "");
6306 do_vfs_signal
= VQ_DEAD
;
6309 lck_mtx_unlock(&nmp
->nm_lock
);
6311 if (do_vfs_signal
== VQ_DEAD
&& !(nmp
->nm_state
& NFSSTA_DEAD
)) {
6312 nfs_mount_zombie(nmp
, NFSSTA_DEAD
);
6313 } else if (softnobrowse
|| wasunresponsive
|| !unresponsive
) {
6316 do_vfs_signal
= VQ_NOTRESP
;
6318 if (do_vfs_signal
) {
6319 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, do_vfs_signal
, 0);
6322 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, error
);
6326 nfs_up(struct nfsmount
*nmp
, thread_t thd
, int flags
, const char *msg
)
6328 int timeoutmask
, wasunresponsive
, unresponsive
, softnobrowse
;
6331 if (nfs_mount_gone(nmp
)) {
6336 nfs_msg(thd
, vfs_statfs(nmp
->nm_mountp
)->f_mntfromname
, msg
, 0);
6339 lck_mtx_lock(&nmp
->nm_lock
);
6341 timeoutmask
= NFSSTA_TIMEO
| NFSSTA_LOCKTIMEO
| NFSSTA_JUKEBOXTIMEO
;
6342 if (NMFLAG(nmp
, MUTEJUKEBOX
)) { /* jukebox timeouts don't count as unresponsive if muted */
6343 timeoutmask
&= ~NFSSTA_JUKEBOXTIMEO
;
6345 wasunresponsive
= (nmp
->nm_state
& timeoutmask
);
6347 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6348 softnobrowse
= (NMFLAG(nmp
, SOFT
) && (vfs_flags(nmp
->nm_mountp
) & MNT_DONTBROWSE
));
6350 if ((flags
& NFSSTA_TIMEO
) && (nmp
->nm_state
& NFSSTA_TIMEO
)) {
6351 nmp
->nm_state
&= ~NFSSTA_TIMEO
;
6353 if ((flags
& NFSSTA_LOCKTIMEO
) && (nmp
->nm_state
& NFSSTA_LOCKTIMEO
)) {
6354 nmp
->nm_state
&= ~NFSSTA_LOCKTIMEO
;
6356 if ((flags
& NFSSTA_JUKEBOXTIMEO
) && (nmp
->nm_state
& NFSSTA_JUKEBOXTIMEO
)) {
6357 nmp
->nm_state
&= ~NFSSTA_JUKEBOXTIMEO
;
6360 unresponsive
= (nmp
->nm_state
& timeoutmask
);
6362 nmp
->nm_deadto_start
= 0;
6363 nmp
->nm_curdeadtimeout
= nmp
->nm_deadtimeout
;
6364 nmp
->nm_state
&= ~NFSSTA_SQUISHY
;
6365 lck_mtx_unlock(&nmp
->nm_lock
);
6370 do_vfs_signal
= (wasunresponsive
&& !unresponsive
);
6372 if (do_vfs_signal
) {
6373 vfs_event_signal(&vfs_statfs(nmp
->nm_mountp
)->f_fsid
, VQ_NOTRESP
, 1);
6378 #endif /* CONFIG_NFS_CLIENT */
6380 #if CONFIG_NFS_SERVER
6383 * Generate the rpc reply header
6384 * siz arg. is used to decide if adding a cluster is worthwhile
6388 struct nfsrv_descript
*nd
,
6389 __unused
struct nfsrv_sock
*slp
,
6390 struct nfsm_chain
*nmrepp
,
6395 struct nfsm_chain nmrep
;
6398 err
= nd
->nd_repstat
;
6399 if (err
&& (nd
->nd_vers
== NFS_VER2
)) {
6404 * If this is a big reply, use a cluster else
6405 * try and leave leading space for the lower level headers.
6407 siz
+= RPC_REPLYSIZ
;
6408 if (siz
>= nfs_mbuf_minclsize
) {
6409 error
= mbuf_getpacket(MBUF_WAITOK
, &mrep
);
6411 error
= mbuf_gethdr(MBUF_WAITOK
, MBUF_TYPE_DATA
, &mrep
);
6414 /* unable to allocate packet */
6415 /* XXX should we keep statistics for these errors? */
6418 if (siz
< nfs_mbuf_minclsize
) {
6419 /* leave space for lower level headers */
6420 tl
= mbuf_data(mrep
);
6421 tl
+= 80 / sizeof(*tl
); /* XXX max_hdr? XXX */
6422 mbuf_setdata(mrep
, tl
, 6 * NFSX_UNSIGNED
);
6424 nfsm_chain_init(&nmrep
, mrep
);
6425 nfsm_chain_add_32(error
, &nmrep
, nd
->nd_retxid
);
6426 nfsm_chain_add_32(error
, &nmrep
, RPC_REPLY
);
6427 if (err
== ERPCMISMATCH
|| (err
& NFSERR_AUTHERR
)) {
6428 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGDENIED
);
6429 if (err
& NFSERR_AUTHERR
) {
6430 nfsm_chain_add_32(error
, &nmrep
, RPC_AUTHERR
);
6431 nfsm_chain_add_32(error
, &nmrep
, (err
& ~NFSERR_AUTHERR
));
6433 nfsm_chain_add_32(error
, &nmrep
, RPC_MISMATCH
);
6434 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6435 nfsm_chain_add_32(error
, &nmrep
, RPC_VER2
);
6439 nfsm_chain_add_32(error
, &nmrep
, RPC_MSGACCEPTED
);
6440 if (nd
->nd_gss_context
!= NULL
) {
6441 /* RPCSEC_GSS verifier */
6442 error
= nfs_gss_svc_verf_put(nd
, &nmrep
);
6444 nfsm_chain_add_32(error
, &nmrep
, RPC_SYSTEM_ERR
);
6448 /* RPCAUTH_NULL verifier */
6449 nfsm_chain_add_32(error
, &nmrep
, RPCAUTH_NULL
);
6450 nfsm_chain_add_32(error
, &nmrep
, 0);
6452 /* accepted status */
6455 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGUNAVAIL
);
6458 nfsm_chain_add_32(error
, &nmrep
, RPC_PROGMISMATCH
);
6459 /* XXX hard coded versions? */
6460 nfsm_chain_add_32(error
, &nmrep
, NFS_VER2
);
6461 nfsm_chain_add_32(error
, &nmrep
, NFS_VER3
);
6464 nfsm_chain_add_32(error
, &nmrep
, RPC_PROCUNAVAIL
);
6467 nfsm_chain_add_32(error
, &nmrep
, RPC_GARBAGE
);
6470 nfsm_chain_add_32(error
, &nmrep
, RPC_SUCCESS
);
6471 if (nd
->nd_gss_context
!= NULL
) {
6472 error
= nfs_gss_svc_prepare_reply(nd
, &nmrep
);
6474 if (err
!= NFSERR_RETVOID
) {
6475 nfsm_chain_add_32(error
, &nmrep
,
6476 (err
? nfsrv_errmap(nd
, err
) : 0));
6483 nfsm_chain_build_done(error
, &nmrep
);
6485 /* error composing reply header */
6486 /* XXX should we keep statistics for these errors? */
6492 if ((err
!= 0) && (err
!= NFSERR_RETVOID
)) {
6493 OSAddAtomic64(1, &nfsstats
.srvrpc_errs
);
6499 * The nfs server send routine.
6501 * - return EINTR or ERESTART if interrupted by a signal
6502 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6503 * - do any cleanup required by recoverable socket errors (???)
6506 nfsrv_send(struct nfsrv_sock
*slp
, mbuf_t nam
, mbuf_t top
)
6509 socket_t so
= slp
->ns_so
;
6510 struct sockaddr
*sendnam
;
6513 bzero(&msg
, sizeof(msg
));
6514 if (nam
&& !sock_isconnected(so
) && (slp
->ns_sotype
!= SOCK_STREAM
)) {
6515 if ((sendnam
= mbuf_data(nam
))) {
6516 msg
.msg_name
= (caddr_t
)sendnam
;
6517 msg
.msg_namelen
= sendnam
->sa_len
;
6520 if (NFS_IS_DBG(NFS_FAC_SRV
, 15)) {
6521 nfs_dump_mbuf(__func__
, __LINE__
, "nfsrv_send\n", top
);
6523 error
= sock_sendmbuf(so
, &msg
, top
, 0, NULL
);
6527 log(LOG_INFO
, "nfsd send error %d\n", error
);
6529 if ((error
== EWOULDBLOCK
) && (slp
->ns_sotype
== SOCK_STREAM
)) {
6530 error
= EPIPE
; /* zap TCP sockets if they time out on send */
6532 /* Handle any recoverable (soft) socket errors here. (???) */
6533 if (error
!= EINTR
&& error
!= ERESTART
&& error
!= EIO
&&
6534 error
!= EWOULDBLOCK
&& error
!= EPIPE
) {
6542 * Socket upcall routine for the nfsd sockets.
6543 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6544 * Essentially do as much as possible non-blocking, else punt and it will
6545 * be called with MBUF_WAITOK from an nfsd.
6548 nfsrv_rcv(socket_t so
, void *arg
, int waitflag
)
6550 struct nfsrv_sock
*slp
= arg
;
6552 if (!nfsd_thread_count
|| !(slp
->ns_flag
& SLP_VALID
)) {
6556 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
6557 nfsrv_rcv_locked(so
, slp
, waitflag
);
6558 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6561 nfsrv_rcv_locked(socket_t so
, struct nfsrv_sock
*slp
, int waitflag
)
6563 mbuf_t m
, mp
, mhck
, m2
;
6564 int ns_flag
= 0, error
;
6568 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
6569 if (waitflag
== MBUF_DONTWAIT
) {
6570 lck_rw_done(&slp
->ns_rwlock
);
6577 * Define this to test for nfsds handling this under heavy load.
6579 if (waitflag
== MBUF_DONTWAIT
) {
6580 ns_flag
= SLP_NEEDQ
;
6584 if (slp
->ns_sotype
== SOCK_STREAM
) {
6586 * If there are already records on the queue, defer soreceive()
6587 * to an(other) nfsd so that there is feedback to the TCP layer that
6588 * the nfs servers are heavily loaded.
6591 ns_flag
= SLP_NEEDQ
;
6598 bytes_read
= 1000000000;
6599 error
= sock_receivembuf(so
, NULL
, &mp
, MSG_DONTWAIT
, &bytes_read
);
6600 if (error
|| mp
== NULL
) {
6601 if (error
== EWOULDBLOCK
) {
6602 ns_flag
= (waitflag
== MBUF_DONTWAIT
) ? SLP_NEEDQ
: 0;
6604 ns_flag
= SLP_DISCONN
;
6609 if (slp
->ns_rawend
) {
6610 if ((error
= mbuf_setnext(slp
->ns_rawend
, m
))) {
6611 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error
);
6613 slp
->ns_cc
+= bytes_read
;
6616 slp
->ns_cc
= bytes_read
;
6618 while ((m2
= mbuf_next(m
))) {
6624 * Now try and parse record(s) out of the raw stream data.
6626 error
= nfsrv_getstream(slp
, waitflag
);
6628 if (error
== EPERM
) {
6629 ns_flag
= SLP_DISCONN
;
6631 ns_flag
= SLP_NEEDQ
;
6635 struct sockaddr_storage nam
;
6637 if (slp
->ns_reccnt
>= nfsrv_sock_max_rec_queue_length
) {
6638 /* already have max # RPC records queued on this socket */
6639 ns_flag
= SLP_NEEDQ
;
6643 bzero(&msg
, sizeof(msg
));
6644 msg
.msg_name
= (caddr_t
)&nam
;
6645 msg
.msg_namelen
= sizeof(nam
);
6648 bytes_read
= 1000000000;
6649 error
= sock_receivembuf(so
, &msg
, &mp
, MSG_DONTWAIT
| MSG_NEEDSA
, &bytes_read
);
6651 if (msg
.msg_name
&& (mbuf_get(MBUF_WAITOK
, MBUF_TYPE_SONAME
, &mhck
) == 0)) {
6652 mbuf_setlen(mhck
, nam
.ss_len
);
6653 bcopy(&nam
, mbuf_data(mhck
), nam
.ss_len
);
6655 if (mbuf_setnext(m
, mp
)) {
6656 /* trouble... just drop it */
6657 printf("nfsrv_rcv: mbuf_setnext failed\n");
6664 if (slp
->ns_recend
) {
6665 mbuf_setnextpkt(slp
->ns_recend
, m
);
6668 slp
->ns_flag
|= SLP_DOREC
;
6671 mbuf_setnextpkt(m
, NULL
);
6678 * Now try and process the request records, non-blocking.
6682 slp
->ns_flag
|= ns_flag
;
6684 if (waitflag
== MBUF_DONTWAIT
) {
6685 int wake
= (slp
->ns_flag
& SLP_WORKTODO
);
6686 lck_rw_done(&slp
->ns_rwlock
);
6687 if (wake
&& nfsd_thread_count
) {
6688 lck_mtx_lock(nfsd_mutex
);
6689 nfsrv_wakenfsd(slp
);
6690 lck_mtx_unlock(nfsd_mutex
);
6696 * Try and extract an RPC request from the mbuf data list received on a
6697 * stream socket. The "waitflag" argument indicates whether or not it
6701 nfsrv_getstream(struct nfsrv_sock
*slp
, int waitflag
)
6704 char *cp1
, *cp2
, *mdata
;
6707 mbuf_t om
, m2
, recm
;
6710 if (slp
->ns_flag
& SLP_GETSTREAM
) {
6711 panic("nfs getstream");
6713 slp
->ns_flag
|= SLP_GETSTREAM
;
6715 if (slp
->ns_reclen
== 0) {
6716 if (slp
->ns_cc
< NFSX_UNSIGNED
) {
6717 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6721 mdata
= mbuf_data(m
);
6723 if (mlen
>= NFSX_UNSIGNED
) {
6724 bcopy(mdata
, (caddr_t
)&recmark
, NFSX_UNSIGNED
);
6725 mdata
+= NFSX_UNSIGNED
;
6726 mlen
-= NFSX_UNSIGNED
;
6727 mbuf_setdata(m
, mdata
, mlen
);
6729 cp1
= (caddr_t
)&recmark
;
6731 while (cp1
< ((caddr_t
)&recmark
) + NFSX_UNSIGNED
) {
6739 mbuf_setdata(m
, cp2
, mlen
);
6742 slp
->ns_cc
-= NFSX_UNSIGNED
;
6743 recmark
= ntohl(recmark
);
6744 slp
->ns_reclen
= recmark
& ~0x80000000;
6745 if (recmark
& 0x80000000) {
6746 slp
->ns_flag
|= SLP_LASTFRAG
;
6748 slp
->ns_flag
&= ~SLP_LASTFRAG
;
6750 if (slp
->ns_reclen
<= 0 || slp
->ns_reclen
> NFS_MAXPACKET
) {
6751 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6757 * Now get the record part.
6759 * Note that slp->ns_reclen may be 0. Linux sometimes
6760 * generates 0-length RPCs
6763 if (slp
->ns_cc
== slp
->ns_reclen
) {
6765 slp
->ns_raw
= slp
->ns_rawend
= NULL
;
6766 slp
->ns_cc
= slp
->ns_reclen
= 0;
6767 } else if (slp
->ns_cc
> slp
->ns_reclen
) {
6771 mdata
= mbuf_data(m
);
6773 while (len
< slp
->ns_reclen
) {
6774 if ((len
+ mlen
) > slp
->ns_reclen
) {
6775 if (mbuf_copym(m
, 0, slp
->ns_reclen
- len
, waitflag
, &m2
)) {
6776 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6780 if (mbuf_setnext(om
, m2
)) {
6781 /* trouble... just drop it */
6782 printf("nfsrv_getstream: mbuf_setnext failed\n");
6784 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6791 mdata
+= slp
->ns_reclen
- len
;
6792 mlen
-= slp
->ns_reclen
- len
;
6793 mbuf_setdata(m
, mdata
, mlen
);
6794 len
= slp
->ns_reclen
;
6795 } else if ((len
+ mlen
) == slp
->ns_reclen
) {
6800 if (mbuf_setnext(om
, NULL
)) {
6801 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6802 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6806 mdata
= mbuf_data(m
);
6812 mdata
= mbuf_data(m
);
6819 slp
->ns_flag
&= ~SLP_GETSTREAM
;
6824 * Accumulate the fragments into a record.
6826 if (slp
->ns_frag
== NULL
) {
6827 slp
->ns_frag
= recm
;
6830 while ((m2
= mbuf_next(m
))) {
6833 if ((error
= mbuf_setnext(m
, recm
))) {
6834 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error
);
6837 if (slp
->ns_flag
& SLP_LASTFRAG
) {
6838 if (slp
->ns_recend
) {
6839 mbuf_setnextpkt(slp
->ns_recend
, slp
->ns_frag
);
6841 slp
->ns_rec
= slp
->ns_frag
;
6842 slp
->ns_flag
|= SLP_DOREC
;
6844 slp
->ns_recend
= slp
->ns_frag
;
6845 slp
->ns_frag
= NULL
;
6851 * Parse an RPC header.
6855 struct nfsrv_sock
*slp
,
6857 struct nfsrv_descript
**ndp
)
6861 struct nfsrv_descript
*nd
;
6865 if (!(slp
->ns_flag
& (SLP_VALID
| SLP_DOREC
)) || (slp
->ns_rec
== NULL
)) {
6868 nd
= zalloc(nfsrv_descript_zone
);
6870 slp
->ns_rec
= mbuf_nextpkt(m
);
6872 mbuf_setnextpkt(m
, NULL
);
6874 slp
->ns_flag
&= ~SLP_DOREC
;
6875 slp
->ns_recend
= NULL
;
6878 if (mbuf_type(m
) == MBUF_TYPE_SONAME
) {
6881 if ((error
= mbuf_setnext(nam
, NULL
))) {
6882 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error
);
6888 nfsm_chain_dissect_init(error
, &nd
->nd_nmreq
, m
);
6890 error
= nfsrv_getreq(nd
);
6896 if (nd
->nd_gss_context
) {
6897 nfs_gss_svc_ctx_deref(nd
->nd_gss_context
);
6899 NFS_ZFREE(nfsrv_descript_zone
, nd
);
6909 * Parse an RPC request
6911 * - fill in the cred struct.
6914 nfsrv_getreq(struct nfsrv_descript
*nd
)
6916 struct nfsm_chain
*nmreq
;
6918 u_int32_t nfsvers
, auth_type
;
6926 nd
->nd_gss_context
= NULL
;
6927 nd
->nd_gss_seqnum
= 0;
6928 nd
->nd_gss_mb
= NULL
;
6930 user_id
= group_id
= -2;
6931 val
= auth_type
= len
= 0;
6933 nmreq
= &nd
->nd_nmreq
;
6934 nfsm_chain_get_32(error
, nmreq
, nd
->nd_retxid
); // XID
6935 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Call
6936 if (!error
&& (val
!= RPC_CALL
)) {
6941 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Version
6943 if (val
!= RPC_VER2
) {
6944 nd
->nd_repstat
= ERPCMISMATCH
;
6945 nd
->nd_procnum
= NFSPROC_NOOP
;
6948 nfsm_chain_get_32(error
, nmreq
, val
); // RPC Program Number
6950 if (val
!= NFS_PROG
) {
6951 nd
->nd_repstat
= EPROGUNAVAIL
;
6952 nd
->nd_procnum
= NFSPROC_NOOP
;
6955 nfsm_chain_get_32(error
, nmreq
, nfsvers
);// NFS Version Number
6957 if ((nfsvers
< NFS_VER2
) || (nfsvers
> NFS_VER3
)) {
6958 nd
->nd_repstat
= EPROGMISMATCH
;
6959 nd
->nd_procnum
= NFSPROC_NOOP
;
6962 nd
->nd_vers
= nfsvers
;
6963 nfsm_chain_get_32(error
, nmreq
, nd
->nd_procnum
);// NFS Procedure Number
6965 if ((nd
->nd_procnum
>= NFS_NPROCS
) ||
6966 ((nd
->nd_vers
== NFS_VER2
) && (nd
->nd_procnum
> NFSV2PROC_STATFS
))) {
6967 nd
->nd_repstat
= EPROCUNAVAIL
;
6968 nd
->nd_procnum
= NFSPROC_NOOP
;
6971 if (nfsvers
!= NFS_VER3
) {
6972 nd
->nd_procnum
= nfsv3_procid
[nd
->nd_procnum
];
6974 nfsm_chain_get_32(error
, nmreq
, auth_type
); // Auth Flavor
6975 nfsm_chain_get_32(error
, nmreq
, len
); // Auth Length
6976 if (!error
&& (len
< 0 || len
> RPCAUTH_MAXSIZ
)) {
6981 /* Handle authentication */
6982 if (auth_type
== RPCAUTH_SYS
) {
6983 struct posix_cred temp_pcred
;
6984 if (nd
->nd_procnum
== NFSPROC_NULL
) {
6987 nd
->nd_sec
= RPCAUTH_SYS
;
6988 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // skip stamp
6989 nfsm_chain_get_32(error
, nmreq
, len
); // hostname length
6990 if (len
< 0 || len
> NFS_MAXNAMLEN
) {
6993 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
)); // skip hostname
6996 /* create a temporary credential using the bits from the wire */
6997 bzero(&temp_pcred
, sizeof(temp_pcred
));
6998 nfsm_chain_get_32(error
, nmreq
, user_id
);
6999 nfsm_chain_get_32(error
, nmreq
, group_id
);
7000 temp_pcred
.cr_groups
[0] = group_id
;
7001 nfsm_chain_get_32(error
, nmreq
, len
); // extra GID count
7002 if ((len
< 0) || (len
> RPCAUTH_UNIXGIDS
)) {
7006 for (i
= 1; i
<= len
; i
++) {
7008 nfsm_chain_get_32(error
, nmreq
, temp_pcred
.cr_groups
[i
]);
7010 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
);
7014 ngroups
= (len
>= NGROUPS
) ? NGROUPS
: (short)(len
+ 1);
7016 nfsrv_group_sort(&temp_pcred
.cr_groups
[0], ngroups
);
7018 nfsm_chain_adv(error
, nmreq
, NFSX_UNSIGNED
); // verifier flavor (should be AUTH_NONE)
7019 nfsm_chain_get_32(error
, nmreq
, len
); // verifier length
7020 if (len
< 0 || len
> RPCAUTH_MAXSIZ
) {
7024 nfsm_chain_adv(error
, nmreq
, nfsm_rndup(len
));
7027 /* request creation of a real credential */
7028 temp_pcred
.cr_uid
= user_id
;
7029 temp_pcred
.cr_ngroups
= ngroups
;
7030 nd
->nd_cr
= posix_cred_create(&temp_pcred
);
7031 if (nd
->nd_cr
== NULL
) {
7032 nd
->nd_repstat
= ENOMEM
;
7033 nd
->nd_procnum
= NFSPROC_NOOP
;
7036 } else if (auth_type
== RPCSEC_GSS
) {
7037 error
= nfs_gss_svc_cred_get(nd
, nmreq
);
7039 if (error
== EINVAL
) {
7040 goto nfsmout
; // drop the request
7042 nd
->nd_repstat
= error
;
7043 nd
->nd_procnum
= NFSPROC_NOOP
;
7047 if (nd
->nd_procnum
== NFSPROC_NULL
) { // assume it's AUTH_NONE
7050 nd
->nd_repstat
= (NFSERR_AUTHERR
| AUTH_REJECTCRED
);
7051 nd
->nd_procnum
= NFSPROC_NOOP
;
7056 if (IS_VALID_CRED(nd
->nd_cr
)) {
7057 kauth_cred_unref(&nd
->nd_cr
);
7059 nfsm_chain_cleanup(nmreq
);
7064 * Search for a sleeping nfsd and wake it up.
7065 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7066 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7067 * Note: Must be called with nfsd_mutex held.
7070 nfsrv_wakenfsd(struct nfsrv_sock
*slp
)
7074 if ((slp
->ns_flag
& SLP_VALID
) == 0) {
7078 lck_rw_lock_exclusive(&slp
->ns_rwlock
);
7079 /* if there's work to do on this socket, make sure it's queued up */
7080 if ((slp
->ns_flag
& SLP_WORKTODO
) && !(slp
->ns_flag
& SLP_QUEUED
)) {
7081 TAILQ_INSERT_TAIL(&nfsrv_sockwait
, slp
, ns_svcq
);
7082 slp
->ns_flag
|= SLP_WAITQ
;
7084 lck_rw_done(&slp
->ns_rwlock
);
7086 /* wake up a waiting nfsd, if possible */
7087 nd
= TAILQ_FIRST(&nfsd_queue
);
7092 TAILQ_REMOVE(&nfsd_queue
, nd
, nfsd_queue
);
7093 nd
->nfsd_flag
&= ~NFSD_WAITING
;
7097 #endif /* CONFIG_NFS_SERVER */
7099 #endif /* CONFIG_NFS */