]> git.saurik.com Git - apple/xnu.git/blame - bsd/nfs/nfs_socket.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_socket.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68/*
69 * Socket operations for use by nfs
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
6d2010ae 75#include <sys/signalvar.h>
91447636
A
76#include <sys/kauth.h>
77#include <sys/mount_internal.h>
1c79356b 78#include <sys/kernel.h>
91447636 79#include <sys/kpi_mbuf.h>
1c79356b
A
80#include <sys/malloc.h>
81#include <sys/vnode.h>
82#include <sys/domain.h>
83#include <sys/protosw.h>
84#include <sys/socket.h>
cb323159 85#include <sys/un.h>
1c79356b
A
86#include <sys/syslog.h>
87#include <sys/tprintf.h>
91447636 88#include <libkern/OSAtomic.h>
1c79356b
A
89
90#include <sys/time.h>
91#include <kern/clock.h>
4a249263
A
92#include <kern/task.h>
93#include <kern/thread.h>
2d21ac55 94#include <kern/thread_call.h>
9bccf70c 95#include <sys/user.h>
6d2010ae 96#include <sys/acct.h>
1c79356b
A
97
98#include <netinet/in.h>
99#include <netinet/tcp.h>
100
101#include <nfs/rpcv2.h>
6d2010ae 102#include <nfs/krpc.h>
1c79356b
A
103#include <nfs/nfsproto.h>
104#include <nfs/nfs.h>
105#include <nfs/xdr_subs.h>
106#include <nfs/nfsm_subs.h>
2d21ac55 107#include <nfs/nfs_gss.h>
1c79356b
A
108#include <nfs/nfsmount.h>
109#include <nfs/nfsnode.h>
1c79356b 110
39236c6e 111#define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
cb323159 112#define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
39236c6e 113
2d21ac55 114/* XXX */
0a7de745
A
115boolean_t current_thread_aborted(void);
116kern_return_t thread_terminate(thread_t);
2d21ac55
A
117
118
119#if NFSSERVER
120int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
121
0a7de745 122int nfsrv_getstream(struct nfsrv_sock *, int);
b0d623f7 123int nfsrv_getreq(struct nfsrv_descript *);
2d21ac55
A
124extern int nfsv3_procid[NFS_NPROCS];
125#endif /* NFSSERVER */
126
6d2010ae
A
127/*
128 * compare two sockaddr structures
129 */
130int
131nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
132{
0a7de745
A
133 if (!sa1) {
134 return -1;
135 }
136 if (!sa2) {
137 return 1;
138 }
139 if (sa1->sa_family != sa2->sa_family) {
140 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
141 }
142 if (sa1->sa_len != sa2->sa_len) {
143 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
144 }
145 if (sa1->sa_family == AF_INET) {
146 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
147 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
148 }
149 if (sa1->sa_family == AF_INET6) {
150 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
151 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
152 }
153 return -1;
6d2010ae
A
154}
155
2d21ac55
A
156#if NFSCLIENT
157
0a7de745
A
158int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
159int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
160int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
161void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
162void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
163int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
164int nfs_reconnect(struct nfsmount *);
165int nfs_connect_setup(struct nfsmount *);
166void nfs_mount_sock_thread(void *, wait_result_t);
167void nfs_udp_rcv(socket_t, void*, int);
168void nfs_tcp_rcv(socket_t, void*, int);
169void nfs_sock_poke(struct nfsmount *);
170void nfs_request_match_reply(struct nfsmount *, mbuf_t);
171void nfs_reqdequeue(struct nfsreq *);
172void nfs_reqbusy(struct nfsreq *);
b0d623f7 173struct nfsreq *nfs_reqnext(struct nfsreq *);
0a7de745
A
174int nfs_wait_reply(struct nfsreq *);
175void nfs_softterm(struct nfsreq *);
176int nfs_can_squish(struct nfsmount *);
177int nfs_is_squishy(struct nfsmount *);
178int nfs_is_dead(int, struct nfsmount *);
fa4905b1 179
1c79356b
A
180/*
181 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
182 * Use the mean and mean deviation of rtt for the appropriate type of rpc
183 * for the frequent rpcs and a default for the others.
184 * The justification for doing "other" this way is that these rpcs
185 * happen so infrequently that timer est. would probably be stale.
186 * Also, since many of these rpcs are
187 * non-idempotent, a conservative timeout is desired.
188 * getattr, lookup - A+2D
189 * read, write - A+4D
190 * other - nm_timeo
191 */
0a7de745 192#define NFS_RTO(n, t) \
1c79356b
A
193 ((t) == 0 ? (n)->nm_timeo : \
194 ((t) < 3 ? \
195 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
196 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
0a7de745
A
197#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
198#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
1c79356b
A
199
200/*
201 * Defines which timer to use for the procnum.
202 * 0 - default
203 * 1 - getattr
204 * 2 - lookup
205 * 3 - read
206 * 4 - write
207 */
cb323159
A
208static const int proct[] = {
209 [NFSPROC_NULL] = 0,
210 [NFSPROC_GETATTR] = 1,
211 [NFSPROC_SETATTR] = 0,
212 [NFSPROC_LOOKUP] = 2,
213 [NFSPROC_ACCESS] = 1,
214 [NFSPROC_READLINK] = 3,
215 [NFSPROC_READ] = 3,
216 [NFSPROC_WRITE] = 4,
217 [NFSPROC_CREATE] = 0,
218 [NFSPROC_MKDIR] = 0,
219 [NFSPROC_SYMLINK] = 0,
220 [NFSPROC_MKNOD] = 0,
221 [NFSPROC_REMOVE] = 0,
222 [NFSPROC_RMDIR] = 0,
223 [NFSPROC_RENAME] = 0,
224 [NFSPROC_LINK] = 0,
225 [NFSPROC_READDIR] = 3,
226 [NFSPROC_READDIRPLUS] = 3,
227 [NFSPROC_FSSTAT] = 0,
228 [NFSPROC_FSINFO] = 0,
229 [NFSPROC_PATHCONF] = 0,
230 [NFSPROC_COMMIT] = 0,
231 [NFSPROC_NOOP] = 0,
1c79356b
A
232};
233
234/*
235 * There is a congestion window for outstanding rpcs maintained per mount
236 * point. The cwnd size is adjusted in roughly the way that:
237 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
238 * SIGCOMM '88". ACM, August 1988.
239 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
240 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
241 * of rpcs is in progress.
242 * (The sent count and cwnd are scaled for integer arith.)
243 * Variants of "slow start" were tried and were found to be too much of a
244 * performance hit (ave. rtt 3 times larger),
245 * I suspect due to the large rtt that nfs rpcs have.
246 */
0a7de745
A
247#define NFS_CWNDSCALE 256
248#define NFS_MAXCWND (NFS_CWNDSCALE * 32)
1c79356b 249static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
4a249263 250
1c79356b 251/*
6d2010ae 252 * Increment location index to next address/server/location.
1c79356b 253 */
6d2010ae
A
254void
255nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
1c79356b 256{
6d2010ae
A
257 uint8_t loc = nlip->nli_loc;
258 uint8_t serv = nlip->nli_serv;
259 uint8_t addr = nlip->nli_addr;
260
261 /* move to next address */
262 addr++;
263 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
264 /* no more addresses on current server, go to first address of next server */
265next_server:
266 addr = 0;
267 serv++;
268 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
269 /* no more servers on current location, go to first server of next location */
270 serv = 0;
271 loc++;
0a7de745 272 if (loc >= nlp->nl_numlocs) {
6d2010ae 273 loc = 0; /* after last location, wrap back around to first location */
0a7de745 274 }
b0d623f7 275 }
1c79356b 276 }
1c79356b 277 /*
6d2010ae
A
278 * It's possible for this next server to not have any addresses.
279 * Check for that here and go to the next server.
280 * But bail out if we've managed to come back around to the original
281 * location that was passed in. (That would mean no servers had any
282 * addresses. And we don't want to spin here forever.)
1c79356b 283 */
0a7de745 284 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
6d2010ae 285 return;
0a7de745
A
286 }
287 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
6d2010ae 288 goto next_server;
0a7de745 289 }
6d2010ae
A
290
291 nlip->nli_loc = loc;
292 nlip->nli_serv = serv;
293 nlip->nli_addr = addr;
294}
295
296/*
297 * Compare two location indices.
298 */
299int
300nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
301{
0a7de745
A
302 if (nlip1->nli_loc != nlip2->nli_loc) {
303 return nlip1->nli_loc - nlip2->nli_loc;
304 }
305 if (nlip1->nli_serv != nlip2->nli_serv) {
306 return nlip1->nli_serv - nlip2->nli_serv;
307 }
308 return nlip1->nli_addr - nlip2->nli_addr;
6d2010ae
A
309}
310
311/*
312 * Get the mntfromname (or path portion only) for a given location.
313 */
314void
315nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly)
316{
317 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
318 char *p;
319 int cnt, i;
320
321 p = s;
322 if (!pathonly) {
cb323159
A
323 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
324 if (name == NULL) {
325 name = "";
326 }
327 if (*name == '\0') {
328 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
329 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
330 }
331 cnt = snprintf(p, size, "<%s>:", name);
332 } else {
333 cnt = snprintf(p, size, "%s:", name);
334 }
6d2010ae
A
335 p += cnt;
336 size -= cnt;
337 }
338 if (fsl->nl_path.np_compcount == 0) {
339 /* mounting root export on server */
340 if (size > 0) {
341 *p++ = '/';
342 *p++ = '\0';
1c79356b 343 }
6d2010ae
A
344 return;
345 }
346 /* append each server path component */
0a7de745 347 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
6d2010ae
A
348 cnt = snprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
349 p += cnt;
350 size -= cnt;
351 }
352}
2d21ac55 353
6d2010ae
A
354/*
355 * NFS client connect socket upcall.
356 * (Used only during socket connect/search.)
357 */
358void
359nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
360{
361 struct nfs_socket *nso = arg;
362 size_t rcvlen;
363 mbuf_t m;
364 int error = 0, recv = 1;
365
366 if (nso->nso_flags & NSO_CONNECTING) {
cb323159 367 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
6d2010ae
A
368 wakeup(nso->nso_wake);
369 return;
370 }
371
372 lck_mtx_lock(&nso->nso_lock);
0a7de745 373 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
39236c6e 374 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
6d2010ae
A
375 lck_mtx_unlock(&nso->nso_lock);
376 return;
377 }
cb323159 378 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
6d2010ae
A
379 nso->nso_flags |= NSO_UPCALL;
380
381 /* loop while we make error-free progress */
382 while (!error && recv) {
383 /* make sure we're still interested in this socket */
0a7de745 384 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
6d2010ae 385 break;
0a7de745 386 }
6d2010ae
A
387 lck_mtx_unlock(&nso->nso_lock);
388 m = NULL;
389 if (nso->nso_sotype == SOCK_STREAM) {
390 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
cb323159 391 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
6d2010ae
A
392 } else {
393 rcvlen = 1000000;
394 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
395 recv = m ? 1 : 0;
396 }
397 lck_mtx_lock(&nso->nso_lock);
398 if (m) {
399 /* match response with request */
400 struct nfsm_chain nmrep;
401 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
402 uint32_t reply_status, rejected_status, accepted_status;
403
cb323159 404 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
6d2010ae
A
405 nfsm_chain_dissect_init(error, &nmrep, m);
406 nfsm_chain_get_32(error, &nmrep, rxid);
407 nfsm_chain_get_32(error, &nmrep, reply);
0a7de745 408 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
6d2010ae 409 error = EBADRPC;
0a7de745 410 }
6d2010ae
A
411 nfsm_chain_get_32(error, &nmrep, reply_status);
412 if (!error && (reply_status == RPC_MSGDENIED)) {
413 nfsm_chain_get_32(error, &nmrep, rejected_status);
0a7de745 414 if (!error) {
6d2010ae 415 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
0a7de745 416 }
2d21ac55 417 }
6d2010ae
A
418 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
419 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
420 nfsmout_if(error);
0a7de745 421 if (verf_len) {
6d2010ae 422 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
0a7de745 423 }
6d2010ae
A
424 nfsm_chain_get_32(error, &nmrep, accepted_status);
425 nfsmout_if(error);
cb323159 426 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
6d2010ae
A
427 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
428 uint32_t minvers, maxvers;
429 nfsm_chain_get_32(error, &nmrep, minvers);
430 nfsm_chain_get_32(error, &nmrep, maxvers);
431 nfsmout_if(error);
432 if (nso->nso_protocol == PMAPPROG) {
0a7de745 433 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
6d2010ae 434 error = EPROGMISMATCH;
0a7de745
A
435 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
436 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
6d2010ae 437 nso->nso_version = PMAPVERS;
0a7de745
A
438 } else if (nso->nso_saddr->sa_family == AF_INET6) {
439 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
6d2010ae 440 nso->nso_version = RPCBVERS4;
0a7de745 441 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
6d2010ae 442 nso->nso_version = RPCBVERS3;
0a7de745 443 }
6d2010ae
A
444 }
445 } else if (nso->nso_protocol == NFS_PROG) {
3e170ce0
A
446 int vers;
447
448 /*
449 * N.B. Both portmapper and rpcbind V3 are happy to return
450 * addresses for other versions than the one you ask (getport or
451 * getaddr) and thus we may have fallen to this code path. So if
452 * we get a version that we support, use highest supported
453 * version. This assumes that the server supports all versions
454 * between minvers and maxvers. Note for IPv6 we will try and
455 * use rpcbind V4 which has getversaddr and we should not get
456 * here if that was successful.
457 */
458 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
0a7de745
A
459 if (vers >= (int)minvers && vers <= (int)maxvers) {
460 break;
461 }
3e170ce0
A
462 }
463 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
6d2010ae 464 }
0a7de745 465 if (!error && nso->nso_version) {
6d2010ae 466 accepted_status = RPC_SUCCESS;
0a7de745 467 }
1c79356b 468 }
6d2010ae
A
469 if (!error) {
470 switch (accepted_status) {
471 case RPC_SUCCESS:
472 error = 0;
473 break;
474 case RPC_PROGUNAVAIL:
475 error = EPROGUNAVAIL;
476 break;
477 case RPC_PROGMISMATCH:
478 error = EPROGMISMATCH;
479 break;
480 case RPC_PROCUNAVAIL:
481 error = EPROCUNAVAIL;
482 break;
483 case RPC_GARBAGE:
484 error = EBADRPC;
485 break;
486 case RPC_SYSTEM_ERR:
487 default:
488 error = EIO;
489 break;
490 }
491 }
492nfsmout:
493 nso->nso_flags &= ~NSO_PINGING;
494 if (error) {
cb323159
A
495 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
496 nso->nso_protocol, nso->nso_version, error);
6d2010ae
A
497 nso->nso_error = error;
498 nso->nso_flags |= NSO_DEAD;
499 } else {
500 nso->nso_flags |= NSO_VERIFIED;
501 }
502 mbuf_freem(m);
503 /* wake up search thread */
504 wakeup(nso->nso_wake);
505 break;
2d21ac55 506 }
6d2010ae
A
507 }
508
509 nso->nso_flags &= ~NSO_UPCALL;
510 if ((error != EWOULDBLOCK) && (error || !recv)) {
511 /* problems with the socket... */
cb323159 512 NFS_SOCK_DBG("connect upcall failed %d\n", error);
6d2010ae
A
513 nso->nso_error = error ? error : EPIPE;
514 nso->nso_flags |= NSO_DEAD;
515 wakeup(nso->nso_wake);
516 }
0a7de745 517 if (nso->nso_flags & NSO_DISCONNECTING) {
6d2010ae 518 wakeup(&nso->nso_flags);
0a7de745 519 }
6d2010ae
A
520 lck_mtx_unlock(&nso->nso_lock);
521}
522
523/*
524 * Create/initialize an nfs_socket structure.
525 */
526int
527nfs_socket_create(
3e170ce0 528 struct nfsmount *nmp,
6d2010ae
A
529 struct sockaddr *sa,
530 int sotype,
531 in_port_t port,
532 uint32_t protocol,
533 uint32_t vers,
534 int resvport,
535 struct nfs_socket **nsop)
536{
537 struct nfs_socket *nso;
538 struct timeval now;
539 int error;
cb323159 540#define NFS_SOCKET_DEBUGGING
6d2010ae 541#ifdef NFS_SOCKET_DEBUGGING
cb323159 542 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
6d2010ae
A
543 void *sinaddr;
544
cb323159
A
545 switch (sa->sa_family) {
546 case AF_INET:
547 case AF_INET6:
548 if (sa->sa_family == AF_INET) {
549 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
550 } else {
551 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
552 }
553 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
554 strlcpy(naddr, "<unknown>", sizeof(naddr));
555 }
556 break;
557 case AF_LOCAL:
558 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
559 break;
560 default:
561 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
562 break;
0a7de745 563 }
39236c6e
A
564#else
565 char naddr[1] = { 0 };
6d2010ae
A
566#endif
567
568 *nsop = NULL;
569
570 /* Create the socket. */
0a7de745
A
571 MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO);
572 if (nso) {
573 MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO);
574 }
6d2010ae 575 if (!nso || !nso->nso_saddr) {
0a7de745 576 if (nso) {
6d2010ae 577 FREE(nso, M_TEMP);
0a7de745
A
578 }
579 return ENOMEM;
6d2010ae
A
580 }
581 lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL);
582 nso->nso_sotype = sotype;
0a7de745 583 if (nso->nso_sotype == SOCK_STREAM) {
6d2010ae 584 nfs_rpc_record_state_init(&nso->nso_rrs);
0a7de745 585 }
6d2010ae
A
586 microuptime(&now);
587 nso->nso_timestamp = now.tv_sec;
588 bcopy(sa, nso->nso_saddr, sa->sa_len);
cb323159
A
589 switch (sa->sa_family) {
590 case AF_INET:
591 case AF_INET6:
592 if (sa->sa_family == AF_INET) {
593 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
594 } else if (sa->sa_family == AF_INET6) {
595 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
596 }
597 break;
598 case AF_LOCAL:
599 break;
0a7de745 600 }
6d2010ae
A
601 nso->nso_protocol = protocol;
602 nso->nso_version = vers;
3e170ce0
A
603 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
604 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
6d2010ae
A
605
606 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
607
608 /* Some servers require that the client port be a reserved port number. */
609 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
610 struct sockaddr_storage ss;
611 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
612 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
613 int portrange = IP_PORTRANGE_LOW;
614
615 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
0a7de745 616 if (!error) { /* bind now to check for failure */
6d2010ae
A
617 ss.ss_len = sa->sa_len;
618 ss.ss_family = sa->sa_family;
619 if (ss.ss_family == AF_INET) {
620 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
621 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
622 } else if (ss.ss_family == AF_INET6) {
623 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
624 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
625 } else {
626 error = EINVAL;
627 }
0a7de745 628 if (!error) {
6d2010ae 629 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
0a7de745 630 }
1c79356b 631 }
1c79356b 632 }
2d21ac55 633
6d2010ae 634 if (error) {
39236c6e 635 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
0a7de745
A
636 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
637 resvport ? "r" : "", port, protocol, vers);
6d2010ae
A
638 nfs_socket_destroy(nso);
639 } else {
cb323159 640 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
0a7de745
A
641 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
642 sotype, resvport ? "r" : "", port, protocol, vers);
6d2010ae
A
643 *nsop = nso;
644 }
0a7de745 645 return error;
6d2010ae
A
646}
647
648/*
649 * Destroy an nfs_socket structure.
650 */
651void
652nfs_socket_destroy(struct nfs_socket *nso)
653{
cb323159 654 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
6d2010ae 655
cb323159 656 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
6d2010ae
A
657 lck_mtx_lock(&nso->nso_lock);
658 nso->nso_flags |= NSO_DISCONNECTING;
0a7de745
A
659 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
660 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
661 }
6d2010ae
A
662 lck_mtx_unlock(&nso->nso_lock);
663 sock_shutdown(nso->nso_so, SHUT_RDWR);
664 sock_close(nso->nso_so);
0a7de745 665 if (nso->nso_sotype == SOCK_STREAM) {
6d2010ae 666 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
0a7de745 667 }
6d2010ae 668 lck_mtx_destroy(&nso->nso_lock, nfs_request_grp);
0a7de745 669 if (nso->nso_saddr) {
6d2010ae 670 FREE(nso->nso_saddr, M_SONAME);
0a7de745
A
671 }
672 if (nso->nso_saddr2) {
6d2010ae 673 FREE(nso->nso_saddr2, M_SONAME);
0a7de745 674 }
39236c6e 675 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
6d2010ae
A
676 FREE(nso, M_TEMP);
677}
678
679/*
680 * Set common socket options on an nfs_socket.
681 */
682void
683nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
684{
55e303ae 685 /*
2d21ac55 686 * Set socket send/receive timeouts
6d2010ae 687 * - Receive timeout shouldn't matter because most receives are performed
2d21ac55
A
688 * in the socket upcall non-blocking.
689 * - Send timeout should allow us to react to a blocked socket.
690 * Soft mounts will want to abort sooner.
55e303ae 691 */
6d2010ae
A
692 struct timeval timeo;
693 int on = 1, proto;
1c79356b 694
6d2010ae 695 timeo.tv_usec = 0;
316670eb 696 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
6d2010ae
A
697 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
698 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
699 if (nso->nso_sotype == SOCK_STREAM) {
2d21ac55 700 /* Assume that SOCK_STREAM always requires a connection */
6d2010ae 701 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
2d21ac55 702 /* set nodelay for TCP */
6d2010ae 703 sock_gettype(nso->nso_so, NULL, NULL, &proto);
0a7de745 704 if (proto == IPPROTO_TCP) {
6d2010ae 705 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
0a7de745 706 }
1c79356b 707 }
cb323159
A
708 if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */
709 int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024);
6d2010ae
A
710 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
711 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
712 }
713 /* set SO_NOADDRERR to detect network changes ASAP */
714 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
715 /* just playin' it safe with upcalls */
716 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
717 /* socket should be interruptible if the mount is */
0a7de745 718 if (!NMFLAG(nmp, INTR)) {
6d2010ae 719 sock_nointerrupt(nso->nso_so, 1);
0a7de745 720 }
6d2010ae
A
721}
722
723/*
724 * Release resources held in an nfs_socket_search.
725 */
726void
727nfs_socket_search_cleanup(struct nfs_socket_search *nss)
728{
729 struct nfs_socket *nso, *nsonext;
730
731 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
732 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
733 nss->nss_sockcnt--;
734 nfs_socket_destroy(nso);
735 }
736 if (nss->nss_sock) {
737 nfs_socket_destroy(nss->nss_sock);
738 nss->nss_sock = NULL;
739 }
740}
741
742/*
743 * Prefer returning certain errors over others.
744 * This function returns a ranking of the given error.
745 */
746int
747nfs_connect_error_class(int error)
748{
749 switch (error) {
750 case 0:
0a7de745 751 return 0;
6d2010ae
A
752 case ETIMEDOUT:
753 case EAGAIN:
0a7de745 754 return 1;
6d2010ae
A
755 case EPIPE:
756 case EADDRNOTAVAIL:
757 case ENETDOWN:
758 case ENETUNREACH:
759 case ENETRESET:
760 case ECONNABORTED:
761 case ECONNRESET:
762 case EISCONN:
763 case ENOTCONN:
764 case ESHUTDOWN:
765 case ECONNREFUSED:
766 case EHOSTDOWN:
767 case EHOSTUNREACH:
0a7de745 768 return 2;
6d2010ae
A
769 case ERPCMISMATCH:
770 case EPROCUNAVAIL:
771 case EPROGMISMATCH:
772 case EPROGUNAVAIL:
0a7de745 773 return 3;
6d2010ae 774 case EBADRPC:
0a7de745 775 return 4;
6d2010ae 776 default:
0a7de745 777 return 5;
6d2010ae
A
778 }
779}
780
781/*
782 * Make sure a socket search returns the best error.
783 */
784void
785nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
786{
0a7de745 787 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
6d2010ae 788 nss->nss_error = error;
0a7de745 789 }
6d2010ae
A
790}
791
39236c6e 792/* nfs_connect_search_new_socket:
0a7de745 793 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
39236c6e
A
794 * by nss.
795 *
796 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
797 * could not be used or if a socket timed out.
6d2010ae
A
798 */
799int
39236c6e 800nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
6d2010ae 801{
6d2010ae
A
802 struct nfs_fs_location *fsl;
803 struct nfs_fs_server *fss;
804 struct sockaddr_storage ss;
39236c6e 805 struct nfs_socket *nso;
6d2010ae 806 char *addrstr;
39236c6e 807 int error = 0;
0a7de745 808
6d2010ae 809
39236c6e 810 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
0a7de745 811 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
6d2010ae 812
39236c6e
A
813 /*
814 * while there are addresses and:
815 * we have no sockets or
816 * the last address failed and did not produce a socket (nss_last < 0) or
817 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
818 * then attempt to create a socket with the current address.
819 */
820 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
0a7de745
A
821 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
822 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
823 return EINTR;
824 }
6d2010ae
A
825 /* Can we convert the address to a sockaddr? */
826 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
827 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
828 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
cb323159
A
829 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
830 if (*addrstr == '\0') {
831 /*
832 * We have an unspecified local domain address. We use the program to translate to
833 * a well known local transport address. We only support PMAPROG and NFS for this.
834 */
835 if (nss->nss_protocol == PMAPPROG) {
836 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
837 } else if (nss->nss_protocol == NFS_PROG) {
838 addrstr = nmp->nm_nfs_localport;
839 if (!addrstr || *addrstr == '\0') {
840 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
841 }
842 }
843 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
844 }
6d2010ae 845 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
cb323159 846 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
6d2010ae 847 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
39236c6e 848 nss->nss_addrcnt -= 1;
6d2010ae
A
849 nss->nss_last = -2;
850 continue;
851 }
852 /* Check that socket family is acceptable. */
853 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
cb323159 854 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
6d2010ae 855 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
39236c6e 856 nss->nss_addrcnt -= 1;
6d2010ae
A
857 nss->nss_last = -2;
858 continue;
859 }
860
861 /* Create the socket. */
862 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
0a7de745
A
863 nss->nss_port, nss->nss_protocol, nss->nss_version,
864 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
865 if (error) {
866 return error;
867 }
6d2010ae
A
868
869 nso->nso_location = nss->nss_nextloc;
870 nso->nso_wake = nss;
871 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
2d21ac55 872 if (error) {
cb323159 873 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
6d2010ae
A
874 lck_mtx_lock(&nso->nso_lock);
875 nso->nso_error = error;
876 nso->nso_flags |= NSO_DEAD;
877 lck_mtx_unlock(&nso->nso_lock);
878 }
879
880 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
881 nss->nss_sockcnt++;
882 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
39236c6e 883 nss->nss_addrcnt -= 1;
0a7de745 884
39236c6e 885 nss->nss_last = now->tv_sec;
6d2010ae
A
886 }
887
0a7de745 888 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
39236c6e 889 nss->nss_last = now->tv_sec;
0a7de745
A
890 }
891
892 return error;
39236c6e
A
893}
894
895/*
896 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
897 * If successful set the socket options for the socket as require from the mount.
898 *
899 * Assumes: nso->nso_lock is held on entry and return.
900 */
901int
902nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
903{
904 int error;
0a7de745 905
39236c6e
A
906 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
907 /* no connection needed, just say it's already connected */
908 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
0a7de745 909 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
39236c6e
A
910 nso->nso_flags |= NSO_CONNECTED;
911 nfs_socket_options(nmp, nso);
0a7de745 912 return 1; /* Socket is connected and setup */
39236c6e
A
913 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
914 /* initiate the connection */
915 nso->nso_flags |= NSO_CONNECTING;
916 lck_mtx_unlock(&nso->nso_lock);
cb323159
A
917 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
918 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
919 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
39236c6e 920 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
cb323159
A
921 if (error) {
922 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
923 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
924 }
6d2010ae 925 lck_mtx_lock(&nso->nso_lock);
39236c6e
A
926 if (error && (error != EINPROGRESS)) {
927 nso->nso_error = error;
928 nso->nso_flags |= NSO_DEAD;
0a7de745 929 return 0;
6d2010ae 930 }
39236c6e
A
931 }
932 if (nso->nso_flags & NSO_CONNECTING) {
933 /* check the connection */
934 if (sock_isconnected(nso->nso_so)) {
935 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
0a7de745 936 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
39236c6e
A
937 nso->nso_flags &= ~NSO_CONNECTING;
938 nso->nso_flags |= NSO_CONNECTED;
939 nfs_socket_options(nmp, nso);
0a7de745 940 return 1; /* Socket is connected and setup */
39236c6e
A
941 } else {
942 int optlen = sizeof(error);
943 error = 0;
944 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
945 if (error) { /* we got an error on the socket */
946 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
0a7de745
A
947 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
948 if (verbose) {
39236c6e
A
949 printf("nfs connect socket error %d for %s\n",
950 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
0a7de745 951 }
6d2010ae
A
952 nso->nso_error = error;
953 nso->nso_flags |= NSO_DEAD;
0a7de745 954 return 0;
6d2010ae
A
955 }
956 }
39236c6e 957 }
0a7de745
A
958
959 return 0; /* Waiting to be connected */
39236c6e
A
960}
961
962/*
963 * nfs_connect_search_ping: Send a null proc on the nso socket.
964 */
965int
966nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
967{
968 /* initiate a NULL RPC request */
969 uint64_t xid = nso->nso_pingxid;
970 mbuf_t m, mreq = NULL;
971 struct msghdr msg;
972 size_t reqlen, sentlen;
973 uint32_t vers = nso->nso_version;
974 int error;
975
976 if (!vers) {
0a7de745 977 if (nso->nso_protocol == PMAPPROG) {
39236c6e 978 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
0a7de745 979 } else if (nso->nso_protocol == NFS_PROG) {
3e170ce0 980 vers = PVER2MAJOR(nmp->nm_max_vers);
0a7de745 981 }
39236c6e
A
982 }
983 lck_mtx_unlock(&nso->nso_lock);
cb323159 984 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
39236c6e 985 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
0a7de745 986 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
39236c6e
A
987 lck_mtx_lock(&nso->nso_lock);
988 if (!error) {
989 nso->nso_flags |= NSO_PINGING;
990 nso->nso_pingxid = R_XID32(xid);
991 nso->nso_reqtimestamp = now->tv_sec;
992 bzero(&msg, sizeof(msg));
993 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
994 msg.msg_name = nso->nso_saddr;
995 msg.msg_namelen = nso->nso_saddr->sa_len;
6d2010ae 996 }
0a7de745 997 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
39236c6e 998 reqlen += mbuf_len(m);
0a7de745 999 }
6d2010ae 1000 lck_mtx_unlock(&nso->nso_lock);
cb323159 1001 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
39236c6e
A
1002 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1003 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
0a7de745 1004 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
39236c6e 1005 lck_mtx_lock(&nso->nso_lock);
0a7de745 1006 if (!error && (sentlen != reqlen)) {
39236c6e 1007 error = ETIMEDOUT;
0a7de745 1008 }
39236c6e
A
1009 }
1010 if (error) {
1011 nso->nso_error = error;
1012 nso->nso_flags |= NSO_DEAD;
0a7de745 1013 return 0;
6d2010ae
A
1014 }
1015
0a7de745 1016 return 1;
39236c6e
A
1017}
1018
1019/*
1020 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
0a7de745 1021 * Set the nfs socket protocol and version if needed.
39236c6e
A
1022 */
1023void
3e170ce0 1024nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
39236c6e
A
1025{
1026 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
0a7de745 1027 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
39236c6e
A
1028 if (!nso->nso_version) {
1029 /* If the version isn't set, the default must have worked. */
0a7de745 1030 if (nso->nso_protocol == PMAPPROG) {
39236c6e 1031 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
0a7de745
A
1032 }
1033 if (nso->nso_protocol == NFS_PROG) {
3e170ce0 1034 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
0a7de745 1035 }
39236c6e
A
1036 }
1037 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1038 nss->nss_sockcnt--;
1039 nss->nss_sock = nso;
1040}
1041
1042/*
1043 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1044 * the list. Dead socket are then destroyed.
1045 */
1046void
1047nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1048{
1049 struct nfs_socket *nso, *nsonext;
0a7de745 1050
6d2010ae
A
1051 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1052 lck_mtx_lock(&nso->nso_lock);
39236c6e 1053 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
6d2010ae 1054 /* took too long */
39236c6e 1055 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
0a7de745 1056 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
6d2010ae
A
1057 nso->nso_error = ETIMEDOUT;
1058 nso->nso_flags |= NSO_DEAD;
1059 }
1060 if (!(nso->nso_flags & NSO_DEAD)) {
1061 lck_mtx_unlock(&nso->nso_lock);
1062 continue;
1063 }
1064 lck_mtx_unlock(&nso->nso_lock);
cb323159
A
1065 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1066 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
6d2010ae
A
1067 nfs_socket_search_update_error(nss, nso->nso_error);
1068 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1069 nss->nss_sockcnt--;
1070 nfs_socket_destroy(nso);
39236c6e 1071 /* If there are more sockets to try, force the starting of another socket */
0a7de745 1072 if (nss->nss_addrcnt > 0) {
6d2010ae 1073 nss->nss_last = -2;
0a7de745 1074 }
6d2010ae 1075 }
39236c6e
A
1076}
1077
1078/*
1079 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1080 */
1081int
1082nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1083{
1084 int error;
1085
1086 /* log a warning if connect is taking a while */
0a7de745 1087 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
39236c6e
A
1088 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1089 nss->nss_flags |= NSS_WARNED;
1090 }
0a7de745
A
1091 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1092 return EINTR;
1093 }
1094 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1095 return error;
1096 }
39236c6e
A
1097
1098 /* If we were succesfull at sending a ping, wait up to a second for a reply */
0a7de745 1099 if (nss->nss_last >= 0) {
39236c6e 1100 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
0a7de745
A
1101 }
1102
1103 return 0;
39236c6e
A
1104}
1105
6d2010ae 1106
39236c6e
A
1107/*
1108 * Continue the socket search until we have something to report.
1109 */
1110int
1111nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1112{
1113 struct nfs_socket *nso;
1114 struct timeval now;
1115 int error;
1116 int verbose = (nss->nss_flags & NSS_VERBOSE);
0a7de745 1117
39236c6e
A
1118loop:
1119 microuptime(&now);
1120 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1121
1122 /* add a new socket to the socket list if needed and available */
1123 error = nfs_connect_search_new_socket(nmp, nss, &now);
1124 if (error) {
1125 NFS_SOCK_DBG("nfs connect returned %d\n", error);
0a7de745 1126 return error;
39236c6e 1127 }
0a7de745 1128
39236c6e
A
1129 /* check each active socket on the list and try to push it along */
1130 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1131 lck_mtx_lock(&nso->nso_lock);
1132
1133 /* If not connected connect it */
1134 if (!(nso->nso_flags & NSO_CONNECTED)) {
1135 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1136 lck_mtx_unlock(&nso->nso_lock);
1137 continue;
1138 }
1139 }
1140
1141 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
0a7de745
A
1142 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1143 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
39236c6e
A
1144 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1145 lck_mtx_unlock(&nso->nso_lock);
1146 continue;
1147 }
1148 }
1149
1150 /* Has the socket been verified by the up call routine? */
1151 if (nso->nso_flags & NSO_VERIFIED) {
1152 /* WOOHOO!! This socket looks good! */
1153 nfs_connect_search_socket_found(nmp, nss, nso);
1154 lck_mtx_unlock(&nso->nso_lock);
1155 break;
1156 }
1157 lck_mtx_unlock(&nso->nso_lock);
1158 }
0a7de745 1159
39236c6e
A
1160 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1161 nfs_connect_search_socket_reap(nmp, nss, &now);
0a7de745 1162
6d2010ae
A
1163 /*
1164 * Keep looping if we haven't found a socket yet and we have more
1165 * sockets to (continue to) try.
1166 */
1167 error = 0;
39236c6e
A
1168 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1169 error = nfs_connect_search_check(nmp, nss, &now);
0a7de745 1170 if (!error) {
39236c6e 1171 goto loop;
0a7de745 1172 }
6d2010ae
A
1173 }
1174
39236c6e 1175 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
0a7de745 1176 return error;
6d2010ae
A
1177}
1178
1179/*
1180 * Initialize a new NFS connection.
1181 *
1182 * Search for a location to connect a socket to and initialize the connection.
1183 *
1184 * An NFS mount may have multiple locations/servers/addresses available.
1185 * We attempt to connect to each one asynchronously and will start
1186 * several sockets in parallel if other locations are slow to answer.
1187 * We'll use the first NFS socket we can successfully set up.
1188 *
1189 * The search may involve contacting the portmapper service first.
1190 *
1191 * A mount's initial connection may require negotiating some parameters such
1192 * as socket type and NFS version.
1193 */
3e170ce0 1194
6d2010ae
A
1195int
1196nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1197{
1198 struct nfs_socket_search nss;
1199 struct nfs_socket *nso, *nsonfs;
1200 struct sockaddr_storage ss;
1201 struct sockaddr *saddr, *oldsaddr;
1202 sock_upcall upcall;
cb323159
A
1203#if CONFIG_NFS4
1204 struct timeval now;
1205#endif
1206 struct timeval start;
6d2010ae 1207 int error, savederror, nfsvers;
3e170ce0 1208 int tryv4 = 1;
0a7de745 1209 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
6d2010ae
A
1210 fhandle_t *fh = NULL;
1211 char *path = NULL;
1212 in_port_t port;
39236c6e 1213 int addrtotal = 0;
0a7de745 1214
6d2010ae
A
1215 /* paranoia... check that we have at least one address in the locations */
1216 uint32_t loc, serv;
0a7de745
A
1217 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1218 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
39236c6e 1219 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
0a7de745 1220 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
39236c6e 1221 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
0a7de745
A
1222 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1223 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1224 }
6d2010ae 1225 }
6d2010ae 1226 }
39236c6e
A
1227
1228 if (addrtotal == 0) {
1229 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
0a7de745
A
1230 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1231 return EINVAL;
1232 } else {
39236c6e 1233 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
0a7de745
A
1234 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1235 }
6d2010ae
A
1236
1237 lck_mtx_lock(&nmp->nm_lock);
1238 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1239 nmp->nm_nss = &nss;
1240 lck_mtx_unlock(&nmp->nm_lock);
1241 microuptime(&start);
1242 savederror = error = 0;
1243
1244tryagain:
1245 /* initialize socket search state */
1246 bzero(&nss, sizeof(nss));
39236c6e 1247 nss.nss_addrcnt = addrtotal;
6d2010ae
A
1248 nss.nss_error = savederror;
1249 TAILQ_INIT(&nss.nss_socklist);
1250 nss.nss_sotype = sotype;
1251 nss.nss_startloc = nmp->nm_locations.nl_current;
1252 nss.nss_timestamp = start.tv_sec;
1253 nss.nss_timeo = timeo;
0a7de745 1254 if (verbose) {
6d2010ae 1255 nss.nss_flags |= NSS_VERBOSE;
0a7de745 1256 }
6d2010ae
A
1257
1258 /* First time connecting, we may need to negotiate some things */
1259 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
cb323159
A
1260 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1261 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1262 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
6d2010ae
A
1263 if (!nmp->nm_vers) {
1264 /* No NFS version specified... */
1265 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
cb323159 1266#if CONFIG_NFS4
3e170ce0
A
1267 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1268 nss.nss_port = NFS_PORT;
1269 nss.nss_protocol = NFS_PROG;
1270 nss.nss_version = 4;
1271 nss.nss_flags |= NSS_FALLBACK2PMAP;
1272 } else {
cb323159
A
1273#endif
1274 /* ...connect to portmapper first if we (may) need any ports. */
1275 nss.nss_port = PMAPPORT;
1276 nss.nss_protocol = PMAPPROG;
1277 nss.nss_version = 0;
1278#if CONFIG_NFS4
1279 }
1280#endif
6d2010ae
A
1281 } else {
1282 /* ...connect to NFS port first. */
1283 nss.nss_port = nmp->nm_nfsport;
1284 nss.nss_protocol = NFS_PROG;
1285 nss.nss_version = 0;
1286 }
cb323159 1287#if CONFIG_NFS4
6d2010ae 1288 } else if (nmp->nm_vers >= NFS_VER4) {
3e170ce0
A
1289 if (tryv4) {
1290 /* For NFSv4, we use the given (or default) port. */
1291 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1292 nss.nss_protocol = NFS_PROG;
1293 nss.nss_version = 4;
1294 /*
1295 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1296 * if no port is specified on the mount;
1297 * Note nm_vers is set so we will only try NFS_VER4.
1298 */
0a7de745 1299 if (!nmp->nm_nfsport) {
3e170ce0 1300 nss.nss_flags |= NSS_FALLBACK2PMAP;
0a7de745 1301 }
3e170ce0
A
1302 } else {
1303 nss.nss_port = PMAPPORT;
1304 nss.nss_protocol = PMAPPROG;
1305 nss.nss_version = 0;
1306 }
cb323159 1307#endif
6d2010ae
A
1308 } else {
1309 /* For NFSv3/v2... */
1310 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1311 /* ...connect to portmapper first if we need any ports. */
1312 nss.nss_port = PMAPPORT;
1313 nss.nss_protocol = PMAPPROG;
1314 nss.nss_version = 0;
1315 } else {
1316 /* ...connect to NFS port first. */
1317 nss.nss_port = nmp->nm_nfsport;
1318 nss.nss_protocol = NFS_PROG;
1319 nss.nss_version = nmp->nm_vers;
1320 }
1321 }
39236c6e 1322 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
0a7de745
A
1323 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1324 nss.nss_protocol, nss.nss_version);
6d2010ae
A
1325 } else {
1326 /* we've connected before, just connect to NFS port */
1327 if (!nmp->nm_nfsport) {
1328 /* need to ask portmapper which port that would be */
1329 nss.nss_port = PMAPPORT;
1330 nss.nss_protocol = PMAPPROG;
1331 nss.nss_version = 0;
1332 } else {
1333 nss.nss_port = nmp->nm_nfsport;
1334 nss.nss_protocol = NFS_PROG;
1335 nss.nss_version = nmp->nm_vers;
1336 }
39236c6e 1337 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
0a7de745
A
1338 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1339 nss.nss_protocol, nss.nss_version);
6d2010ae
A
1340 }
1341
1342 /* Set next location to first valid location. */
1343 /* If start location is invalid, find next location. */
1344 nss.nss_nextloc = nss.nss_startloc;
1345 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1346 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1347 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1348 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
39236c6e 1349 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
0a7de745
A
1350 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1351 return ENOENT;
6d2010ae
A
1352 }
1353 }
1354 nss.nss_last = -1;
1355
1356keepsearching:
1357
1358 error = nfs_connect_search_loop(nmp, &nss);
1359 if (error || !nss.nss_sock) {
1360 /* search failed */
1361 nfs_socket_search_cleanup(&nss);
3e170ce0
A
1362 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1363 tryv4 = 0;
1364 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
0a7de745 1365 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
3e170ce0
A
1366 goto tryagain;
1367 }
1368
6d2010ae
A
1369 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1370 /* Try using UDP */
1371 sotype = SOCK_DGRAM;
1372 savederror = nss.nss_error;
39236c6e 1373 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
0a7de745 1374 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
6d2010ae
A
1375 goto tryagain;
1376 }
0a7de745 1377 if (!error) {
6d2010ae 1378 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
0a7de745 1379 }
6d2010ae
A
1380 lck_mtx_lock(&nmp->nm_lock);
1381 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1382 nmp->nm_nss = NULL;
1383 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1384 if (nss.nss_flags & NSS_WARNED) {
6d2010ae 1385 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
0a7de745
A
1386 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1387 }
1388 if (fh) {
6d2010ae 1389 FREE(fh, M_TEMP);
0a7de745
A
1390 }
1391 if (path) {
6d2010ae 1392 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
0a7de745 1393 }
39236c6e 1394 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
0a7de745
A
1395 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1396 return error;
6d2010ae
A
1397 }
1398
1399 /* try to use nss_sock */
1400 nso = nss.nss_sock;
1401 nss.nss_sock = NULL;
1402
1403 /* We may be speaking to portmap first... to determine port(s). */
0a7de745 1404 if (nso->nso_saddr->sa_family == AF_INET) {
6d2010ae 1405 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
cb323159 1406 } else if (nso->nso_saddr->sa_family == AF_INET6) {
6d2010ae 1407 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
cb323159
A
1408 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1409 if (nso->nso_protocol == PMAPPROG) {
1410 port = PMAPPORT;
1411 }
0a7de745 1412 }
cb323159 1413
6d2010ae
A
1414 if (port == PMAPPORT) {
1415 /* Use this portmapper port to get the port #s we need. */
39236c6e 1416 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
0a7de745 1417 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
6d2010ae
A
1418
1419 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1420 sock_setupcall(nso->nso_so, NULL, NULL);
1421
1422 /* Set up socket address and port for NFS socket. */
1423 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1424
3e170ce0
A
1425 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1426 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
6d2010ae 1427 if (!(port = nmp->nm_nfsport)) {
0a7de745 1428 if (ss.ss_family == AF_INET) {
6d2010ae 1429 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
0a7de745 1430 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1431 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
cb323159
A
1432 } else if (ss.ss_family == AF_LOCAL) {
1433 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1434 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1435 }
0a7de745 1436 }
3e170ce0 1437 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
0a7de745 1438 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
3e170ce0 1439 continue; /* Wrong version */
0a7de745 1440 }
cb323159 1441#if CONFIG_NFS4
0a7de745 1442 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
3e170ce0 1443 continue; /* NFSv4 does not do UDP */
0a7de745 1444 }
cb323159
A
1445#endif
1446 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1447 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1448 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1449 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1450 error = 0;
1451 } else {
1452 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1453 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1454 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1455 }
6d2010ae 1456 if (!error) {
0a7de745 1457 if (ss.ss_family == AF_INET) {
6d2010ae 1458 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
0a7de745 1459 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1460 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
cb323159
A
1461 } else if (ss.ss_family == AF_LOCAL) {
1462 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
0a7de745
A
1463 }
1464 if (!port) {
6d2010ae 1465 error = EPROGUNAVAIL;
0a7de745 1466 }
cb323159 1467#if CONFIG_NFS4
0a7de745 1468 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
3e170ce0 1469 continue; /* We already tried this */
0a7de745 1470 }
cb323159 1471#endif
6d2010ae 1472 }
0a7de745 1473 if (!error) {
3e170ce0 1474 break;
0a7de745 1475 }
6d2010ae 1476 }
0a7de745 1477 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
3e170ce0 1478 error = EPROGUNAVAIL;
0a7de745 1479 }
6d2010ae
A
1480 if (error) {
1481 nfs_socket_search_update_error(&nss, error);
1482 nfs_socket_destroy(nso);
cb323159 1483 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
6d2010ae
A
1484 goto keepsearching;
1485 }
cb323159
A
1486 } else if (nmp->nm_nfs_localport) {
1487 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1488 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
6d2010ae 1489 }
cb323159 1490
6d2010ae 1491 /* Create NFS protocol socket and add it to the list of sockets. */
3e170ce0 1492 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
cb323159
A
1493 if (ss.ss_family == AF_LOCAL) {
1494 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1495 }
6d2010ae 1496 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
0a7de745 1497 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
6d2010ae
A
1498 if (error) {
1499 nfs_socket_search_update_error(&nss, error);
1500 nfs_socket_destroy(nso);
cb323159 1501 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
6d2010ae
A
1502 goto keepsearching;
1503 }
1504 nsonfs->nso_location = nso->nso_location;
1505 nsonfs->nso_wake = &nss;
1506 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1507 if (error) {
1508 nfs_socket_search_update_error(&nss, error);
1509 nfs_socket_destroy(nsonfs);
1510 nfs_socket_destroy(nso);
cb323159 1511 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
6d2010ae
A
1512 goto keepsearching;
1513 }
1514 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1515 nss.nss_sockcnt++;
1516 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1517 /* Set up socket address and port for MOUNT socket. */
2d21ac55 1518 error = 0;
6d2010ae
A
1519 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1520 port = nmp->nm_mountport;
cb323159 1521 NFS_SOCK_DBG("mount port = %d\n", port);
0a7de745 1522 if (ss.ss_family == AF_INET) {
6d2010ae 1523 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
0a7de745 1524 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1525 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
cb323159
A
1526 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1527 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1528 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
0a7de745 1529 }
6d2010ae
A
1530 if (!port) {
1531 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1532 /* If NFS version is unknown, optimistically choose for NFSv3. */
1533 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1534 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
cb323159 1535 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
6d2010ae 1536 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
cb323159 1537 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
6d2010ae
A
1538 }
1539 if (!error) {
0a7de745 1540 if (ss.ss_family == AF_INET) {
6d2010ae 1541 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
0a7de745 1542 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1543 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
cb323159
A
1544 } else if (ss.ss_family == AF_LOCAL) {
1545 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
0a7de745
A
1546 }
1547 if (!port) {
6d2010ae 1548 error = EPROGUNAVAIL;
0a7de745 1549 }
6d2010ae
A
1550 }
1551 /* create sockaddr for MOUNT */
0a7de745
A
1552 if (!error) {
1553 MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO);
1554 }
1555 if (!error && !nsonfs->nso_saddr2) {
6d2010ae 1556 error = ENOMEM;
0a7de745
A
1557 }
1558 if (!error) {
6d2010ae 1559 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
0a7de745 1560 }
6d2010ae 1561 if (error) {
cb323159 1562 NFS_SOCK_DBG("Could not create mount sockaet address %d", error);
6d2010ae
A
1563 lck_mtx_lock(&nsonfs->nso_lock);
1564 nsonfs->nso_error = error;
1565 nsonfs->nso_flags |= NSO_DEAD;
1566 lck_mtx_unlock(&nsonfs->nso_lock);
1567 }
2d21ac55 1568 }
cb323159 1569 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
6d2010ae
A
1570 nfs_socket_destroy(nso);
1571 goto keepsearching;
91447636 1572 }
2d21ac55 1573
6d2010ae 1574 /* nso is an NFS socket */
39236c6e 1575 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
6d2010ae
A
1576
1577 /* If NFS version wasn't specified, it was determined during the connect. */
1578 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1579
1580 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1581 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1582 error = 0;
1583 saddr = nso->nso_saddr2;
1584 if (!saddr) {
1585 /* Need sockaddr for MOUNT port */
cb323159 1586 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
6d2010ae
A
1587 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1588 port = nmp->nm_mountport;
0a7de745 1589 if (ss.ss_family == AF_INET) {
6d2010ae 1590 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
0a7de745 1591 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1592 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
cb323159
A
1593 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1594 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1595 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
0a7de745 1596 }
6d2010ae
A
1597 if (!port) {
1598 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1599 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
cb323159 1600 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
6d2010ae 1601 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
cb323159 1602 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
0a7de745 1603 if (ss.ss_family == AF_INET) {
6d2010ae 1604 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
0a7de745 1605 } else if (ss.ss_family == AF_INET6) {
6d2010ae 1606 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
0a7de745 1607 }
6d2010ae
A
1608 }
1609 if (!error) {
0a7de745 1610 if (port) {
6d2010ae 1611 saddr = (struct sockaddr*)&ss;
0a7de745 1612 } else {
6d2010ae 1613 error = EPROGUNAVAIL;
0a7de745 1614 }
6d2010ae
A
1615 }
1616 }
0a7de745
A
1617 if (saddr) {
1618 MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO);
1619 }
1620 if (saddr && fh) {
316670eb 1621 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
0a7de745 1622 }
6d2010ae 1623 if (!saddr || !fh || !path) {
0a7de745 1624 if (!error) {
6d2010ae 1625 error = ENOMEM;
0a7de745
A
1626 }
1627 if (fh) {
6d2010ae 1628 FREE(fh, M_TEMP);
0a7de745
A
1629 }
1630 if (path) {
6d2010ae 1631 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
0a7de745 1632 }
6d2010ae
A
1633 fh = NULL;
1634 path = NULL;
1635 nfs_socket_search_update_error(&nss, error);
1636 nfs_socket_destroy(nso);
1637 goto keepsearching;
1638 }
1639 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1640 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
0a7de745 1641 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
39236c6e 1642 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
0a7de745 1643 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
6d2010ae
A
1644 if (!error) {
1645 /* Make sure we can agree on a security flavor. */
1646 int o, s; /* indices into mount option and server security flavor lists */
1647 int found = 0;
1648
1649 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1650 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1651 nmp->nm_servsec.count = 1;
1652 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1653 }
1654 if (nmp->nm_sec.count) {
1655 /* Choose the first flavor in our list that the server supports. */
1656 if (!nmp->nm_servsec.count) {
1657 /* we don't know what the server supports, just use our first choice */
1658 nmp->nm_auth = nmp->nm_sec.flavors[0];
1659 found = 1;
1660 }
0a7de745
A
1661 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1662 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
6d2010ae
A
1663 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1664 nmp->nm_auth = nmp->nm_sec.flavors[o];
1665 found = 1;
1666 }
0a7de745
A
1667 }
1668 }
6d2010ae
A
1669 } else {
1670 /* Choose the first one we support from the server's list. */
1671 if (!nmp->nm_servsec.count) {
1672 nmp->nm_auth = RPCAUTH_SYS;
1673 found = 1;
1674 }
0a7de745 1675 for (s = 0; s < nmp->nm_servsec.count; s++) {
6d2010ae
A
1676 switch (nmp->nm_servsec.flavors[s]) {
1677 case RPCAUTH_SYS:
1678 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
0a7de745 1679 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
6d2010ae 1680 found = 0;
0a7de745 1681 }
6d2010ae
A
1682 case RPCAUTH_NONE:
1683 case RPCAUTH_KRB5:
1684 case RPCAUTH_KRB5I:
1685 case RPCAUTH_KRB5P:
1686 if (!found) {
1687 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1688 found = 1;
1689 }
1690 break;
1691 }
0a7de745 1692 }
6d2010ae
A
1693 }
1694 error = !found ? EAUTH : 0;
1695 }
1696 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1697 path = NULL;
1698 if (error) {
1699 nfs_socket_search_update_error(&nss, error);
1700 FREE(fh, M_TEMP);
1701 fh = NULL;
1702 nfs_socket_destroy(nso);
1703 goto keepsearching;
1704 }
0a7de745 1705 if (nmp->nm_fh) {
6d2010ae 1706 FREE(nmp->nm_fh, M_TEMP);
0a7de745 1707 }
6d2010ae
A
1708 nmp->nm_fh = fh;
1709 fh = NULL;
1710 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1711 }
1712
1713 /* put the real upcall in place */
1714 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1715 error = sock_setupcall(nso->nso_so, upcall, nmp);
1c79356b 1716 if (error) {
6d2010ae
A
1717 nfs_socket_search_update_error(&nss, error);
1718 nfs_socket_destroy(nso);
1719 goto keepsearching;
1c79356b 1720 }
1c79356b 1721
6d2010ae
A
1722 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1723 /* set mntfromname to this location */
0a7de745 1724 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
6d2010ae 1725 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
0a7de745
A
1726 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1727 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1728 }
6d2010ae 1729 /* some negotiated values need to remain unchanged for the life of the mount */
0a7de745 1730 if (!nmp->nm_sotype) {
6d2010ae 1731 nmp->nm_sotype = nso->nso_sotype;
0a7de745 1732 }
6d2010ae
A
1733 if (!nmp->nm_vers) {
1734 nmp->nm_vers = nfsvers;
cb323159 1735#if CONFIG_NFS4
6d2010ae
A
1736 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1737 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
0a7de745 1738 if (nso->nso_saddr->sa_family == AF_INET) {
6d2010ae 1739 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
0a7de745 1740 } else if (nso->nso_saddr->sa_family == AF_INET6) {
6d2010ae 1741 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
0a7de745 1742 } else {
6d2010ae 1743 port = 0;
0a7de745
A
1744 }
1745 if (port == NFS_PORT) {
6d2010ae 1746 nmp->nm_nfsport = NFS_PORT;
0a7de745 1747 }
6d2010ae 1748 }
cb323159 1749#endif
6d2010ae 1750 }
cb323159 1751#if CONFIG_NFS4
6d2010ae
A
1752 /* do some version-specific pre-mount set up */
1753 if (nmp->nm_vers >= NFS_VER4) {
1754 microtime(&now);
1755 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
0a7de745 1756 if (!NMFLAG(nmp, NOCALLBACK)) {
6d2010ae 1757 nfs4_mount_callback_setup(nmp);
0a7de745 1758 }
6d2010ae 1759 }
cb323159 1760#endif
6d2010ae 1761 }
1c79356b 1762
6d2010ae
A
1763 /* Initialize NFS socket state variables */
1764 lck_mtx_lock(&nmp->nm_lock);
1c79356b 1765 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
0a7de745 1766 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1c79356b 1767 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
0a7de745 1768 nmp->nm_sdrtt[3] = 0;
6d2010ae 1769 if (nso->nso_sotype == SOCK_DGRAM) {
0a7de745 1770 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
2d21ac55 1771 nmp->nm_sent = 0;
6d2010ae 1772 } else if (nso->nso_sotype == SOCK_STREAM) {
2d21ac55
A
1773 nmp->nm_timeouts = 0;
1774 }
1775 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1776 nmp->nm_sockflags |= NMSOCK_SETUP;
6d2010ae
A
1777 /* move the socket to the mount structure */
1778 nmp->nm_nso = nso;
1779 oldsaddr = nmp->nm_saddr;
1780 nmp->nm_saddr = nso->nso_saddr;
2d21ac55
A
1781 lck_mtx_unlock(&nmp->nm_lock);
1782 error = nfs_connect_setup(nmp);
2d21ac55 1783 lck_mtx_lock(&nmp->nm_lock);
6d2010ae 1784 nmp->nm_sockflags &= ~NMSOCK_SETUP;
2d21ac55
A
1785 if (!error) {
1786 nmp->nm_sockflags |= NMSOCK_READY;
1787 wakeup(&nmp->nm_sockflags);
1788 }
6d2010ae 1789 if (error) {
39236c6e 1790 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
0a7de745 1791 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
6d2010ae
A
1792 nfs_socket_search_update_error(&nss, error);
1793 nmp->nm_saddr = oldsaddr;
1794 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1795 /* undo settings made prior to setup */
0a7de745 1796 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
6d2010ae 1797 nmp->nm_sotype = 0;
0a7de745 1798 }
6d2010ae 1799 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_VERSION)) {
cb323159 1800#if CONFIG_NFS4
6d2010ae 1801 if (nmp->nm_vers >= NFS_VER4) {
0a7de745 1802 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
6d2010ae 1803 nmp->nm_nfsport = 0;
0a7de745
A
1804 }
1805 if (nmp->nm_cbid) {
6d2010ae 1806 nfs4_mount_callback_shutdown(nmp);
0a7de745
A
1807 }
1808 if (IS_VALID_CRED(nmp->nm_mcred)) {
6d2010ae 1809 kauth_cred_unref(&nmp->nm_mcred);
0a7de745 1810 }
6d2010ae
A
1811 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1812 }
cb323159 1813#endif
6d2010ae
A
1814 nmp->nm_vers = 0;
1815 }
1816 }
1817 lck_mtx_unlock(&nmp->nm_lock);
1818 nmp->nm_nso = NULL;
1819 nfs_socket_destroy(nso);
1820 goto keepsearching;
1821 }
1822
1823 /* update current location */
1824 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1825 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1826 /* server has changed, we should initiate failover/recovery */
1827 // XXX
1828 }
1829 nmp->nm_locations.nl_current = nso->nso_location;
1830 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1831
1832 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1833 /* We have now successfully connected... make a note of it. */
1834 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1835 }
1836
2d21ac55 1837 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1838 if (oldsaddr) {
6d2010ae 1839 FREE(oldsaddr, M_SONAME);
0a7de745 1840 }
6d2010ae 1841
0a7de745 1842 if (nss.nss_flags & NSS_WARNED) {
6d2010ae 1843 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
0a7de745
A
1844 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1845 }
6d2010ae
A
1846
1847 nmp->nm_nss = NULL;
1848 nfs_socket_search_cleanup(&nss);
0a7de745 1849 if (fh) {
6d2010ae 1850 FREE(fh, M_TEMP);
0a7de745
A
1851 }
1852 if (path) {
6d2010ae 1853 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
0a7de745 1854 }
39236c6e 1855 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
0a7de745 1856 return 0;
2d21ac55
A
1857}
1858
6d2010ae 1859
2d21ac55 1860/* setup & confirm socket connection is functional */
b0d623f7 1861int
cb323159
A
1862nfs_connect_setup(
1863#if !CONFIG_NFS4
1864 __unused
1865#endif
1866 struct nfsmount *nmp)
2d21ac55 1867{
6d2010ae 1868 int error = 0;
cb323159 1869#if CONFIG_NFS4
2d21ac55 1870 if (nmp->nm_vers >= NFS_VER4) {
6d2010ae
A
1871 if (nmp->nm_state & NFSSTA_CLIENTID) {
1872 /* first, try to renew our current state */
1873 error = nfs4_renew(nmp, R_SETUP);
1874 if ((error == NFSERR_ADMIN_REVOKED) ||
1875 (error == NFSERR_CB_PATH_DOWN) ||
1876 (error == NFSERR_EXPIRED) ||
1877 (error == NFSERR_LEASE_MOVED) ||
1878 (error == NFSERR_STALE_CLIENTID)) {
1879 lck_mtx_lock(&nmp->nm_lock);
1880 nfs_need_recover(nmp, error);
1881 lck_mtx_unlock(&nmp->nm_lock);
1882 }
b0d623f7 1883 }
6d2010ae 1884 error = nfs4_setclientid(nmp);
2d21ac55 1885 }
cb323159 1886#endif
0a7de745 1887 return error;
1c79356b
A
1888}
1889
1890/*
2d21ac55
A
1891 * NFS socket reconnect routine:
1892 * Called when a connection is broken.
1893 * - disconnect the old socket
1c79356b
A
1894 * - nfs_connect() again
1895 * - set R_MUSTRESEND for all outstanding requests on mount point
1896 * If this fails the mount point is DEAD!
1c79356b 1897 */
b0d623f7 1898int
2d21ac55 1899nfs_reconnect(struct nfsmount *nmp)
1c79356b 1900{
2d21ac55
A
1901 struct nfsreq *rq;
1902 struct timeval now;
1903 thread_t thd = current_thread();
b0d623f7
A
1904 int error, wentdown = 0, verbose = 1;
1905 time_t lastmsg;
316670eb 1906 int timeo;
2d21ac55
A
1907
1908 microuptime(&now);
1909 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1c79356b
A
1910
1911 nfs_disconnect(nmp);
2d21ac55 1912
316670eb
A
1913
1914 lck_mtx_lock(&nmp->nm_lock);
1915 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1916 lck_mtx_unlock(&nmp->nm_lock);
1917
1918 while ((error = nfs_connect(nmp, verbose, timeo))) {
b0d623f7
A
1919 verbose = 0;
1920 nfs_disconnect(nmp);
0a7de745
A
1921 if ((error == EINTR) || (error == ERESTART)) {
1922 return EINTR;
1923 }
1924 if (error == EIO) {
1925 return EIO;
1926 }
2d21ac55
A
1927 microuptime(&now);
1928 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1929 lastmsg = now.tv_sec;
fe8ab488 1930 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
2d21ac55
A
1931 wentdown = 1;
1932 }
1933 lck_mtx_lock(&nmp->nm_lock);
4a249263
A
1934 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1935 /* we're not yet completely mounted and */
1936 /* we can't reconnect, so we fail */
2d21ac55 1937 lck_mtx_unlock(&nmp->nm_lock);
39236c6e 1938 NFS_SOCK_DBG("Not mounted returning %d\n", error);
0a7de745 1939 return error;
2d21ac55 1940 }
fe8ab488
A
1941
1942 if (nfs_mount_check_dead_timeout(nmp)) {
1943 nfs_mount_make_zombie(nmp);
1944 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1945 return ENXIO;
fe8ab488 1946 }
0a7de745 1947
2d21ac55
A
1948 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1949 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 1950 return error;
4a249263 1951 }
2d21ac55 1952 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
1953 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1954 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1955 return error;
1956 }
1c79356b
A
1957 }
1958
0a7de745 1959 if (wentdown) {
2d21ac55 1960 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
0a7de745 1961 }
2d21ac55 1962
1c79356b 1963 /*
2d21ac55
A
1964 * Loop through outstanding request list and mark all requests
1965 * as needing a resend. (Though nfs_need_reconnect() probably
1966 * marked them all already.)
1c79356b 1967 */
2d21ac55
A
1968 lck_mtx_lock(nfs_request_mutex);
1969 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
1970 if (rq->r_nmp == nmp) {
1971 lck_mtx_lock(&rq->r_mtx);
1972 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
1973 rq->r_flags |= R_MUSTRESEND;
1974 rq->r_rtt = -1;
1975 wakeup(rq);
0a7de745 1976 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2d21ac55 1977 nfs_asyncio_resend(rq);
0a7de745 1978 }
2d21ac55
A
1979 }
1980 lck_mtx_unlock(&rq->r_mtx);
1981 }
1c79356b 1982 }
2d21ac55 1983 lck_mtx_unlock(nfs_request_mutex);
0a7de745 1984 return 0;
1c79356b
A
1985}
1986
1987/*
1988 * NFS disconnect. Clean up and unlink.
1989 */
1990void
91447636 1991nfs_disconnect(struct nfsmount *nmp)
1c79356b 1992{
6d2010ae 1993 struct nfs_socket *nso;
1c79356b 1994
2d21ac55 1995 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
1996tryagain:
1997 if (nmp->nm_nso) {
cb323159 1998 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
6d2010ae
A
1999 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2000 nmp->nm_state |= NFSSTA_WANTSND;
0a7de745 2001 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
6d2010ae
A
2002 goto tryagain;
2003 }
2004 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
0a7de745 2005 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
6d2010ae
A
2006 goto tryagain;
2007 }
2008 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2009 nmp->nm_sockflags &= ~NMSOCK_READY;
2010 nso = nmp->nm_nso;
2011 nmp->nm_nso = NULL;
0a7de745 2012 if (nso->nso_saddr == nmp->nm_saddr) {
6d2010ae 2013 nso->nso_saddr = NULL;
0a7de745 2014 }
6d2010ae
A
2015 lck_mtx_unlock(&nmp->nm_lock);
2016 nfs_socket_destroy(nso);
2017 lck_mtx_lock(&nmp->nm_lock);
2018 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2d21ac55 2019 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
2020 } else {
2021 lck_mtx_unlock(&nmp->nm_lock);
1c79356b 2022 }
1c79356b
A
2023}
2024
2025/*
2d21ac55 2026 * mark an NFS mount as needing a reconnect/resends.
1c79356b 2027 */
b0d623f7 2028void
2d21ac55 2029nfs_need_reconnect(struct nfsmount *nmp)
1c79356b 2030{
2d21ac55 2031 struct nfsreq *rq;
1c79356b 2032
2d21ac55 2033 lck_mtx_lock(&nmp->nm_lock);
0a7de745 2034 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2d21ac55 2035 lck_mtx_unlock(&nmp->nm_lock);
1c79356b 2036
2d21ac55
A
2037 /*
2038 * Loop through outstanding request list and
2039 * mark all requests as needing a resend.
1c79356b 2040 */
2d21ac55
A
2041 lck_mtx_lock(nfs_request_mutex);
2042 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2043 if (rq->r_nmp == nmp) {
2044 lck_mtx_lock(&rq->r_mtx);
2045 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2046 rq->r_flags |= R_MUSTRESEND;
2047 rq->r_rtt = -1;
2048 wakeup(rq);
0a7de745 2049 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2d21ac55 2050 nfs_asyncio_resend(rq);
0a7de745 2051 }
1c79356b 2052 }
2d21ac55 2053 lck_mtx_unlock(&rq->r_mtx);
55e303ae 2054 }
1c79356b 2055 }
2d21ac55 2056 lck_mtx_unlock(nfs_request_mutex);
1c79356b
A
2057}
2058
6d2010ae 2059
1c79356b 2060/*
2d21ac55 2061 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
1c79356b 2062 */
b0d623f7 2063void
2d21ac55 2064nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
1c79356b 2065{
2d21ac55 2066 struct nfsmount *nmp = arg;
cb323159 2067 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2d21ac55
A
2068 thread_t thd = current_thread();
2069 struct nfsreq *req;
2070 struct timeval now;
6d2010ae 2071 int error, dofinish;
b0d623f7 2072 nfsnode_t np;
39236c6e 2073 int do_reconnect_sleep = 0;
1c79356b 2074
2d21ac55 2075 lck_mtx_lock(&nmp->nm_lock);
b0d623f7 2076 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
0a7de745
A
2077 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2078 !LIST_EMPTY(&nmp->nm_monlist) ||
2079 nmp->nm_deadto_start ||
2080 (nmp->nm_state & NFSSTA_RECOVER) ||
2081 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2082 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2d21ac55 2083 break;
0a7de745 2084 }
2d21ac55 2085 /* do reconnect, if necessary */
0a7de745 2086 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2d21ac55
A
2087 if (nmp->nm_reconnect_start <= 0) {
2088 microuptime(&now);
2089 nmp->nm_reconnect_start = now.tv_sec;
1c79356b 2090 }
2d21ac55 2091 lck_mtx_unlock(&nmp->nm_lock);
39236c6e
A
2092 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2093 /*
0a7de745 2094 * XXX We don't want to call reconnect again right away if returned errors
39236c6e
A
2095 * before that may not have blocked. This has caused spamming null procs
2096 * from machines in the pass.
2097 */
0a7de745 2098 if (do_reconnect_sleep) {
39236c6e 2099 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
0a7de745 2100 }
39236c6e
A
2101 error = nfs_reconnect(nmp);
2102 if (error) {
2103 int lvl = 7;
2104 if (error == EIO || error == EINTR) {
2105 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2106 }
cb323159 2107 NFS_DBG(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
0a7de745 2108 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
39236c6e 2109 } else {
2d21ac55 2110 nmp->nm_reconnect_start = 0;
39236c6e
A
2111 do_reconnect_sleep = 0;
2112 }
2d21ac55 2113 lck_mtx_lock(&nmp->nm_lock);
1c79356b 2114 }
b0d623f7
A
2115 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2116 (nmp->nm_state & NFSSTA_RECOVER) &&
6d2010ae 2117 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
0a7de745 2118 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
b0d623f7
A
2119 /* perform state recovery */
2120 lck_mtx_unlock(&nmp->nm_lock);
6d2010ae 2121 nfs_recover(nmp);
b0d623f7
A
2122 lck_mtx_lock(&nmp->nm_lock);
2123 }
cb323159 2124#if CONFIG_NFS4
6d2010ae 2125 /* handle NFSv4 delegation returns */
0a7de745
A
2126 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2127 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2128 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
b0d623f7 2129 lck_mtx_unlock(&nmp->nm_lock);
6d2010ae 2130 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
b0d623f7
A
2131 lck_mtx_lock(&nmp->nm_lock);
2132 }
cb323159 2133#endif
2d21ac55 2134 /* do resends, if necessary/possible */
fe8ab488 2135 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
0a7de745
A
2136 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2137 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2138 if (req->r_resendtime) {
2d21ac55 2139 microuptime(&now);
0a7de745
A
2140 }
2141 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2d21ac55 2142 req = TAILQ_NEXT(req, r_rchain);
0a7de745
A
2143 }
2144 if (!req) {
2d21ac55 2145 break;
0a7de745 2146 }
2d21ac55
A
2147 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2148 req->r_rchain.tqe_next = NFSREQNOLIST;
2149 lck_mtx_unlock(&nmp->nm_lock);
2150 lck_mtx_lock(&req->r_mtx);
3e170ce0 2151 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2d21ac55
A
2152 if (req->r_error || req->r_nmrep.nmc_mhead) {
2153 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2154 req->r_flags &= ~R_RESENDQ;
2155 wakeup(req);
2156 lck_mtx_unlock(&req->r_mtx);
0a7de745 2157 if (dofinish) {
2d21ac55 2158 nfs_asyncio_finish(req);
0a7de745 2159 }
3e170ce0 2160 nfs_request_rele(req);
2d21ac55
A
2161 lck_mtx_lock(&nmp->nm_lock);
2162 continue;
1c79356b 2163 }
6d2010ae 2164 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2d21ac55
A
2165 req->r_flags &= ~R_RESTART;
2166 req->r_resendtime = 0;
2167 lck_mtx_unlock(&req->r_mtx);
2168 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2169 nfs_reqdequeue(req);
cb323159 2170#if CONFIG_NFS_GSS
6d2010ae 2171 if (nfs_request_using_gss(req)) {
2d21ac55
A
2172 nfs_gss_clnt_rpcdone(req);
2173 error = nfs_gss_clnt_args_restore(req);
0a7de745 2174 if (error == ENEEDAUTH) {
2d21ac55 2175 req->r_xid = 0;
0a7de745 2176 }
1c79356b 2177 }
cb323159 2178#endif /* CONFIG_NFS_GSS */
39236c6e 2179 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
0a7de745
A
2180 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2181 req->r_flags, req->r_rtt);
fe8ab488 2182 error = nfs_sigintr(nmp, req, req->r_thread, 0);
0a7de745 2183 if (!error) {
2d21ac55 2184 error = nfs_request_add_header(req);
0a7de745
A
2185 }
2186 if (!error) {
2d21ac55 2187 error = nfs_request_send(req, 0);
0a7de745 2188 }
2d21ac55 2189 lck_mtx_lock(&req->r_mtx);
0a7de745 2190 if (req->r_flags & R_RESENDQ) {
2d21ac55 2191 req->r_flags &= ~R_RESENDQ;
0a7de745
A
2192 }
2193 if (error) {
2d21ac55 2194 req->r_error = error;
0a7de745 2195 }
2d21ac55
A
2196 wakeup(req);
2197 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2198 lck_mtx_unlock(&req->r_mtx);
0a7de745 2199 if (dofinish) {
2d21ac55 2200 nfs_asyncio_finish(req);
0a7de745 2201 }
3e170ce0 2202 nfs_request_rele(req);
2d21ac55
A
2203 lck_mtx_lock(&nmp->nm_lock);
2204 error = 0;
2205 continue;
ccc36f2f 2206 }
39236c6e 2207 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
0a7de745 2208 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
fe8ab488 2209 error = nfs_sigintr(nmp, req, req->r_thread, 0);
55e303ae 2210 if (!error) {
b0d623f7 2211 req->r_flags |= R_SENDING;
2d21ac55
A
2212 lck_mtx_unlock(&req->r_mtx);
2213 error = nfs_send(req, 0);
2214 lck_mtx_lock(&req->r_mtx);
2215 if (!error) {
0a7de745 2216 if (req->r_flags & R_RESENDQ) {
2d21ac55 2217 req->r_flags &= ~R_RESENDQ;
0a7de745 2218 }
2d21ac55
A
2219 wakeup(req);
2220 lck_mtx_unlock(&req->r_mtx);
3e170ce0 2221 nfs_request_rele(req);
2d21ac55
A
2222 lck_mtx_lock(&nmp->nm_lock);
2223 continue;
55e303ae 2224 }
1c79356b 2225 }
2d21ac55 2226 req->r_error = error;
0a7de745 2227 if (req->r_flags & R_RESENDQ) {
2d21ac55 2228 req->r_flags &= ~R_RESENDQ;
0a7de745 2229 }
2d21ac55
A
2230 wakeup(req);
2231 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2232 lck_mtx_unlock(&req->r_mtx);
0a7de745 2233 if (dofinish) {
2d21ac55 2234 nfs_asyncio_finish(req);
0a7de745 2235 }
3e170ce0 2236 nfs_request_rele(req);
2d21ac55
A
2237 lck_mtx_lock(&nmp->nm_lock);
2238 }
fe8ab488
A
2239 if (nfs_mount_check_dead_timeout(nmp)) {
2240 nfs_mount_make_zombie(nmp);
2241 break;
2242 }
0a7de745
A
2243
2244 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2d21ac55 2245 break;
0a7de745 2246 }
6d2010ae
A
2247 /* check monitored nodes, if necessary/possible */
2248 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2249 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2250 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
fe8ab488 2251 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
0a7de745 2252 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
6d2010ae 2253 break;
0a7de745 2254 }
6d2010ae
A
2255 np->n_mflag |= NMMONSCANINPROG;
2256 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
2257 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2258 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
6d2010ae 2259 nfs_data_update_size(np, 0);
0a7de745 2260 }
6d2010ae
A
2261 lck_mtx_lock(&nmp->nm_lock);
2262 np->n_mflag &= ~NMMONSCANINPROG;
2263 if (np->n_mflag & NMMONSCANWANT) {
2264 np->n_mflag &= ~NMMONSCANWANT;
2265 wakeup(&np->n_mflag);
2266 }
fe8ab488 2267 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
0a7de745 2268 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
6d2010ae 2269 break;
0a7de745 2270 }
6d2010ae
A
2271 }
2272 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
0a7de745 2273 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
6d2010ae 2274 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
0a7de745 2275 }
6d2010ae 2276 }
0a7de745 2277 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
b0d623f7 2278 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
0a7de745 2279 (nmp->nm_state & NFSSTA_RECOVER)) {
b0d623f7 2280 ts.tv_sec = 1;
0a7de745 2281 } else {
6d2010ae 2282 ts.tv_sec = 5;
0a7de745 2283 }
b0d623f7
A
2284 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2285 }
2286 }
2287
2288 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
6d2010ae
A
2289 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2290 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
0a7de745 2291 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
b0d623f7
A
2292 lck_mtx_unlock(&nmp->nm_lock);
2293 nfs3_umount_rpc(nmp, vfs_context_kernel(),
0a7de745 2294 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
b0d623f7 2295 lck_mtx_lock(&nmp->nm_lock);
1c79356b 2296 }
2d21ac55 2297
0a7de745 2298 if (nmp->nm_sockthd == thd) {
2d21ac55 2299 nmp->nm_sockthd = NULL;
0a7de745 2300 }
2d21ac55
A
2301 lck_mtx_unlock(&nmp->nm_lock);
2302 wakeup(&nmp->nm_sockthd);
2303 thread_terminate(thd);
2304}
2305
2306/* start or wake a mount's socket thread */
2307void
2308nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2309{
0a7de745 2310 if (nmp->nm_sockthd) {
2d21ac55 2311 wakeup(&nmp->nm_sockthd);
0a7de745 2312 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2d21ac55 2313 thread_deallocate(nmp->nm_sockthd);
0a7de745 2314 }
1c79356b
A
2315}
2316
b0d623f7
A
2317/*
2318 * Check if we should mark the mount dead because the
2319 * unresponsive mount has reached the dead timeout.
2320 * (must be called with nmp locked)
2321 */
fe8ab488 2322int
b0d623f7
A
2323nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2324{
2325 struct timeval now;
2326
0a7de745 2327 if (nmp->nm_state & NFSSTA_DEAD) {
fe8ab488 2328 return 1;
0a7de745
A
2329 }
2330 if (nmp->nm_deadto_start == 0) {
fe8ab488 2331 return 0;
0a7de745 2332 }
316670eb 2333 nfs_is_squishy(nmp);
0a7de745 2334 if (nmp->nm_curdeadtimeout <= 0) {
fe8ab488 2335 return 0;
0a7de745 2336 }
b0d623f7 2337 microuptime(&now);
0a7de745 2338 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
fe8ab488 2339 return 0;
0a7de745 2340 }
fe8ab488
A
2341 return 1;
2342}
2343
2344/*
2345 * Call nfs_mount_zombie to remove most of the
2346 * nfs state for the mount, and then ask to be forcibly unmounted.
2347 *
2348 * Assumes the nfs mount structure lock nm_lock is held.
2349 */
2350
2351void
2352nfs_mount_make_zombie(struct nfsmount *nmp)
2353{
2354 fsid_t fsid;
0a7de745
A
2355
2356 if (!nmp) {
fe8ab488 2357 return;
0a7de745 2358 }
fe8ab488 2359
0a7de745 2360 if (nmp->nm_state & NFSSTA_DEAD) {
b0d623f7 2361 return;
0a7de745 2362 }
fe8ab488 2363
316670eb 2364 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
0a7de745 2365 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
fe8ab488
A
2366 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2367 lck_mtx_unlock(&nmp->nm_lock);
2368 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2369 vfs_event_signal(&fsid, VQ_DEAD, 0);
2370 lck_mtx_lock(&nmp->nm_lock);
b0d623f7
A
2371}
2372
fe8ab488 2373
b0d623f7
A
2374/*
2375 * NFS callback channel socket state
2376 */
0a7de745 2377struct nfs_callback_socket {
b0d623f7 2378 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
0a7de745
A
2379 socket_t ncbs_so; /* the socket */
2380 struct sockaddr_storage ncbs_saddr; /* socket address */
2381 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2382 time_t ncbs_stamp; /* last accessed at */
2383 uint32_t ncbs_flags; /* see below */
b0d623f7 2384};
0a7de745
A
2385#define NCBSOCK_UPCALL 0x0001
2386#define NCBSOCK_UPCALLWANT 0x0002
2387#define NCBSOCK_DEAD 0x0004
b0d623f7 2388
cb323159 2389#if CONFIG_NFS4
b0d623f7
A
2390/*
2391 * NFS callback channel state
2392 *
2393 * One listening socket for accepting socket connections from servers and
2394 * a list of connected sockets to handle callback requests on.
2395 * Mounts registered with the callback channel are assigned IDs and
2396 * put on a list so that the callback request handling code can match
2397 * the requests up with mounts.
2398 */
2399socket_t nfs4_cb_so = NULL;
6d2010ae 2400socket_t nfs4_cb_so6 = NULL;
b0d623f7 2401in_port_t nfs4_cb_port = 0;
6d2010ae 2402in_port_t nfs4_cb_port6 = 0;
b0d623f7
A
2403uint32_t nfs4_cb_id = 0;
2404uint32_t nfs4_cb_so_usecount = 0;
0a7de745
A
2405TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2406TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
b0d623f7
A
2407
2408int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2409
2410/*
2411 * Set up the callback channel for the NFS mount.
2412 *
2413 * Initializes the callback channel socket state and
2414 * assigns a callback ID to the mount.
2415 */
2416void
2417nfs4_mount_callback_setup(struct nfsmount *nmp)
2418{
2419 struct sockaddr_in sin;
6d2010ae 2420 struct sockaddr_in6 sin6;
b0d623f7 2421 socket_t so = NULL;
6d2010ae 2422 socket_t so6 = NULL;
b0d623f7
A
2423 struct timeval timeo;
2424 int error, on = 1;
6d2010ae 2425 in_port_t port;
b0d623f7
A
2426
2427 lck_mtx_lock(nfs_global_mutex);
2428 if (nfs4_cb_id == 0) {
2429 TAILQ_INIT(&nfs4_cb_mounts);
2430 TAILQ_INIT(&nfs4_cb_socks);
2431 nfs4_cb_id++;
2432 }
2433 nmp->nm_cbid = nfs4_cb_id++;
0a7de745 2434 if (nmp->nm_cbid == 0) {
b0d623f7 2435 nmp->nm_cbid = nfs4_cb_id++;
0a7de745 2436 }
b0d623f7
A
2437 nfs4_cb_so_usecount++;
2438 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2439
2440 if (nfs4_cb_so) {
2441 lck_mtx_unlock(nfs_global_mutex);
2442 return;
2443 }
2444
6d2010ae 2445 /* IPv4 */
b0d623f7
A
2446 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2447 if (error) {
6d2010ae 2448 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
b0d623f7
A
2449 goto fail;
2450 }
2451 so = nfs4_cb_so;
2452
6d2010ae 2453 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
b0d623f7
A
2454 sin.sin_len = sizeof(struct sockaddr_in);
2455 sin.sin_family = AF_INET;
2456 sin.sin_addr.s_addr = htonl(INADDR_ANY);
6d2010ae 2457 sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
b0d623f7
A
2458 error = sock_bind(so, (struct sockaddr *)&sin);
2459 if (error) {
6d2010ae 2460 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
b0d623f7
A
2461 goto fail;
2462 }
2463 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2464 if (error) {
6d2010ae 2465 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
b0d623f7
A
2466 goto fail;
2467 }
2468 nfs4_cb_port = ntohs(sin.sin_port);
2469
2470 error = sock_listen(so, 32);
2471 if (error) {
6d2010ae 2472 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
b0d623f7
A
2473 goto fail;
2474 }
2475
2476 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2477 timeo.tv_usec = 0;
2478 timeo.tv_sec = 60;
2479 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
0a7de745 2480 if (error) {
6d2010ae 2481 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
0a7de745 2482 }
b0d623f7 2483 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
0a7de745 2484 if (error) {
6d2010ae 2485 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
0a7de745 2486 }
b0d623f7
A
2487 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2488 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2489 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2490 error = 0;
2491
6d2010ae
A
2492 /* IPv6 */
2493 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2494 if (error) {
2495 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2496 goto fail;
2497 }
2498 so6 = nfs4_cb_so6;
2499
2500 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2501 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2502 /* try to use specified port or same port as IPv4 */
2503 port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
2504ipv6_bind_again:
2505 sin6.sin6_len = sizeof(struct sockaddr_in6);
2506 sin6.sin6_family = AF_INET6;
2507 sin6.sin6_addr = in6addr_any;
2508 sin6.sin6_port = htons(port);
2509 error = sock_bind(so6, (struct sockaddr *)&sin6);
2510 if (error) {
2511 if (port != nfs_callback_port) {
2512 /* if we simply tried to match the IPv4 port, then try any port */
2513 port = 0;
2514 goto ipv6_bind_again;
2515 }
2516 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2517 goto fail;
2518 }
2519 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2520 if (error) {
2521 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2522 goto fail;
2523 }
2524 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2525
2526 error = sock_listen(so6, 32);
2527 if (error) {
2528 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2529 goto fail;
2530 }
2531
2532 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2533 timeo.tv_usec = 0;
2534 timeo.tv_sec = 60;
2535 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
0a7de745 2536 if (error) {
6d2010ae 2537 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
0a7de745 2538 }
6d2010ae 2539 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
0a7de745 2540 if (error) {
6d2010ae 2541 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
0a7de745 2542 }
6d2010ae
A
2543 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2544 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2545 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2546 error = 0;
2547
b0d623f7
A
2548fail:
2549 if (error) {
6d2010ae 2550 nfs4_cb_so = nfs4_cb_so6 = NULL;
b0d623f7
A
2551 lck_mtx_unlock(nfs_global_mutex);
2552 if (so) {
2553 sock_shutdown(so, SHUT_RDWR);
2554 sock_close(so);
2555 }
6d2010ae
A
2556 if (so6) {
2557 sock_shutdown(so6, SHUT_RDWR);
2558 sock_close(so6);
2559 }
b0d623f7
A
2560 } else {
2561 lck_mtx_unlock(nfs_global_mutex);
2562 }
2563}
2564
2565/*
2566 * Shut down the callback channel for the NFS mount.
2567 *
2568 * Clears the mount's callback ID and releases the mounts
2569 * reference on the callback socket. Last reference dropped
2570 * will also shut down the callback socket(s).
2571 */
2572void
2573nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2574{
2575 struct nfs_callback_socket *ncbsp;
6d2010ae 2576 socket_t so, so6;
b0d623f7 2577 struct nfs4_cb_sock_list cb_socks;
cb323159 2578 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
2579
2580 lck_mtx_lock(nfs_global_mutex);
2581 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2582 /* wait for any callbacks in progress to complete */
0a7de745 2583 while (nmp->nm_cbrefs) {
b0d623f7 2584 msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
0a7de745 2585 }
6d2010ae 2586 nmp->nm_cbid = 0;
b0d623f7
A
2587 if (--nfs4_cb_so_usecount) {
2588 lck_mtx_unlock(nfs_global_mutex);
2589 return;
2590 }
2591 so = nfs4_cb_so;
6d2010ae
A
2592 so6 = nfs4_cb_so6;
2593 nfs4_cb_so = nfs4_cb_so6 = NULL;
b0d623f7
A
2594 TAILQ_INIT(&cb_socks);
2595 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2596 lck_mtx_unlock(nfs_global_mutex);
2597 if (so) {
2598 sock_shutdown(so, SHUT_RDWR);
2599 sock_close(so);
2600 }
6d2010ae
A
2601 if (so6) {
2602 sock_shutdown(so6, SHUT_RDWR);
2603 sock_close(so6);
2604 }
b0d623f7
A
2605 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2606 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2607 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2608 sock_close(ncbsp->ncbs_so);
6d2010ae 2609 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
b0d623f7
A
2610 FREE(ncbsp, M_TEMP);
2611 }
2612}
2613
2614/*
2615 * Check periodically for stale/unused nfs callback sockets
2616 */
0a7de745
A
2617#define NFS4_CB_TIMER_PERIOD 30
2618#define NFS4_CB_IDLE_MAX 300
b0d623f7
A
2619void
2620nfs4_callback_timer(__unused void *param0, __unused void *param1)
2621{
2622 struct nfs_callback_socket *ncbsp, *nextncbsp;
2623 struct timeval now;
2624
2625loop:
2626 lck_mtx_lock(nfs_global_mutex);
2627 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2628 nfs4_callback_timer_on = 0;
2629 lck_mtx_unlock(nfs_global_mutex);
2630 return;
2631 }
2632 microuptime(&now);
2633 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2634 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
0a7de745 2635 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
b0d623f7 2636 continue;
0a7de745 2637 }
b0d623f7
A
2638 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2639 lck_mtx_unlock(nfs_global_mutex);
2640 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2641 sock_close(ncbsp->ncbs_so);
6d2010ae 2642 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
b0d623f7
A
2643 FREE(ncbsp, M_TEMP);
2644 goto loop;
2645 }
2646 nfs4_callback_timer_on = 1;
2647 nfs_interval_timer_start(nfs4_callback_timer_call,
0a7de745 2648 NFS4_CB_TIMER_PERIOD * 1000);
b0d623f7
A
2649 lck_mtx_unlock(nfs_global_mutex);
2650}
2651
2652/*
2653 * Accept a new callback socket.
2654 */
2655void
2656nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2657{
2658 socket_t newso = NULL;
2659 struct nfs_callback_socket *ncbsp;
2660 struct nfsmount *nmp;
2661 struct timeval timeo, now;
6d2010ae 2662 int error, on = 1, ip;
b0d623f7 2663
0a7de745 2664 if (so == nfs4_cb_so) {
6d2010ae 2665 ip = 4;
0a7de745 2666 } else if (so == nfs4_cb_so6) {
6d2010ae 2667 ip = 6;
0a7de745 2668 } else {
b0d623f7 2669 return;
0a7de745 2670 }
b0d623f7
A
2671
2672 /* allocate/initialize a new nfs_callback_socket */
2673 MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
2674 if (!ncbsp) {
2675 log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
2676 return;
2677 }
2678 bzero(ncbsp, sizeof(*ncbsp));
6d2010ae
A
2679 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2680 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
b0d623f7
A
2681
2682 /* accept a new socket */
6d2010ae 2683 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
0a7de745
A
2684 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2685 nfs4_cb_rcv, ncbsp, &newso);
b0d623f7 2686 if (error) {
6d2010ae 2687 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
b0d623f7
A
2688 FREE(ncbsp, M_TEMP);
2689 return;
2690 }
2691
2692 /* set up the new socket */
2693 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2694 timeo.tv_usec = 0;
2695 timeo.tv_sec = 60;
2696 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
0a7de745 2697 if (error) {
6d2010ae 2698 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
0a7de745 2699 }
b0d623f7 2700 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
0a7de745 2701 if (error) {
6d2010ae 2702 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
0a7de745 2703 }
b0d623f7 2704 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
6d2010ae 2705 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
b0d623f7
A
2706 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2707 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2708
2709 ncbsp->ncbs_so = newso;
2710 microuptime(&now);
2711 ncbsp->ncbs_stamp = now.tv_sec;
2712
2713 lck_mtx_lock(nfs_global_mutex);
2714
2715 /* add it to the list */
2716 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2717
2718 /* verify it's from a host we have mounted */
2719 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
6d2010ae 2720 /* check if socket's source address matches this mount's server address */
0a7de745 2721 if (!nmp->nm_saddr) {
6d2010ae 2722 continue;
0a7de745
A
2723 }
2724 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
b0d623f7 2725 break;
0a7de745 2726 }
b0d623f7 2727 }
0a7de745 2728 if (!nmp) { /* we don't want this socket, mark it dead */
b0d623f7 2729 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
0a7de745 2730 }
b0d623f7
A
2731
2732 /* make sure the callback socket cleanup timer is running */
2733 /* (shorten the timer if we've got a socket we don't want) */
2734 if (!nfs4_callback_timer_on) {
2735 nfs4_callback_timer_on = 1;
2736 nfs_interval_timer_start(nfs4_callback_timer_call,
0a7de745 2737 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
b0d623f7
A
2738 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2739 nfs4_callback_timer_on = 2;
2740 thread_call_cancel(nfs4_callback_timer_call);
2741 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2742 }
2743
2744 lck_mtx_unlock(nfs_global_mutex);
2745}
2746
2747/*
2748 * Receive mbufs from callback sockets into RPC records and process each record.
2749 * Detect connection has been closed and shut down.
2750 */
2751void
2752nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2753{
2754 struct nfs_callback_socket *ncbsp = arg;
cb323159 2755 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
b0d623f7
A
2756 struct timeval now;
2757 mbuf_t m;
2758 int error = 0, recv = 1;
2759
2760 lck_mtx_lock(nfs_global_mutex);
2761 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2762 /* wait if upcall is already in progress */
2763 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2764 msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
2765 }
2766 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2767 lck_mtx_unlock(nfs_global_mutex);
2768
2769 /* loop while we make error-free progress */
2770 while (!error && recv) {
6d2010ae 2771 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
0a7de745 2772 if (m) { /* handle the request */
b0d623f7 2773 error = nfs4_cb_handler(ncbsp, m);
0a7de745 2774 }
b0d623f7
A
2775 }
2776
2777 /* note: no error and no data indicates server closed its end */
2778 if ((error != EWOULDBLOCK) && (error || !recv)) {
2779 /*
2780 * Socket is either being closed or should be.
2781 * We can't close the socket in the context of the upcall.
2782 * So we mark it as dead and leave it for the cleanup timer to reap.
2783 */
2784 ncbsp->ncbs_stamp = 0;
2785 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2786 } else {
2787 microuptime(&now);
2788 ncbsp->ncbs_stamp = now.tv_sec;
2789 }
2790
2791 lck_mtx_lock(nfs_global_mutex);
2792 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2793 lck_mtx_unlock(nfs_global_mutex);
2794 wakeup(ncbsp);
2795}
2796
2797/*
2798 * Handle an NFS callback channel request.
2799 */
2800int
2801nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2802{
2803 socket_t so = ncbsp->ncbs_so;
2804 struct nfsm_chain nmreq, nmrep;
2805 mbuf_t mhead = NULL, mrest = NULL, m;
b0d623f7
A
2806 struct msghdr msg;
2807 struct nfsmount *nmp;
2808 fhandle_t fh;
2809 nfsnode_t np;
2810 nfs_stateid stateid;
2811 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2812 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2813 uint32_t auth_type, auth_len;
2814 uint32_t numres, *pnumres;
2815 int error = 0, replen, len;
2816 size_t sentlen = 0;
2817
2818 xid = numops = op = status = procnum = taglen = cbid = 0;
2819
2820 nfsm_chain_dissect_init(error, &nmreq, mreq);
0a7de745
A
2821 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2822 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
b0d623f7 2823 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
0a7de745 2824 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
b0d623f7 2825 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
0a7de745 2826 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
b0d623f7 2827 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
0a7de745 2828 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
b0d623f7 2829 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
0a7de745 2830 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
b0d623f7
A
2831 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2832
2833 /* Handle authentication */
2834 /* XXX just ignore auth for now - handling kerberos may be tricky */
0a7de745
A
2835 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2836 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
b0d623f7 2837 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
0a7de745 2838 if (!error && (auth_len > 0)) {
b0d623f7 2839 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
0a7de745
A
2840 }
2841 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2842 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
b0d623f7 2843 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
0a7de745 2844 if (!error && (auth_len > 0)) {
b0d623f7 2845 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
0a7de745 2846 }
b0d623f7
A
2847 if (error) {
2848 status = error;
2849 error = 0;
2850 goto nfsmout;
2851 }
2852
2853 switch (procnum) {
2854 case NFSPROC4_CB_NULL:
2855 status = NFSERR_RETVOID;
2856 break;
2857 case NFSPROC4_CB_COMPOUND:
2858 /* tag, minorversion, cb ident, numops, op array */
0a7de745 2859 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
b0d623f7
A
2860 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2861
2862 /* start building the body of the response */
0a7de745 2863 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
b0d623f7
A
2864 nfsm_chain_init(&nmrep, mrest);
2865
2866 /* copy tag from request to response */
0a7de745 2867 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
b0d623f7
A
2868 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2869 nfsm_chain_get_32(error, &nmreq, val);
2870 nfsm_chain_add_32(error, &nmrep, val);
2871 }
2872
2873 /* insert number of results placeholder */
2874 numres = 0;
2875 nfsm_chain_add_32(error, &nmrep, numres);
2876 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2877
0a7de745 2878 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
b0d623f7 2879 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
0a7de745
A
2880 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2881 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
b0d623f7 2882 if (error) {
0a7de745 2883 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
b0d623f7 2884 status = error;
0a7de745 2885 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
b0d623f7 2886 status = NFSERR_RESOURCE;
0a7de745 2887 } else {
b0d623f7 2888 status = NFSERR_SERVERFAULT;
0a7de745 2889 }
b0d623f7
A
2890 error = 0;
2891 nfsm_chain_null(&nmrep);
2892 goto nfsmout;
2893 }
2894 /* match the callback ID to a registered mount */
2895 lck_mtx_lock(nfs_global_mutex);
2896 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
0a7de745 2897 if (nmp->nm_cbid != cbid) {
b0d623f7 2898 continue;
0a7de745 2899 }
b0d623f7 2900 /* verify socket's source address matches this mount's server address */
0a7de745 2901 if (!nmp->nm_saddr) {
b0d623f7 2902 continue;
0a7de745
A
2903 }
2904 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
6d2010ae 2905 break;
0a7de745 2906 }
b0d623f7
A
2907 }
2908 /* mark the NFS mount as busy */
0a7de745 2909 if (nmp) {
b0d623f7 2910 nmp->nm_cbrefs++;
0a7de745 2911 }
b0d623f7
A
2912 lck_mtx_unlock(nfs_global_mutex);
2913 if (!nmp) {
2914 /* if no mount match, just drop socket. */
2915 error = EPERM;
2916 nfsm_chain_null(&nmrep);
2917 goto out;
2918 }
2919
2920 /* process ops, adding results to mrest */
2921 while (numops > 0) {
2922 numops--;
2923 nfsm_chain_get_32(error, &nmreq, op);
0a7de745 2924 if (error) {
b0d623f7 2925 break;
0a7de745 2926 }
b0d623f7
A
2927 switch (op) {
2928 case NFS_OP_CB_GETATTR:
2929 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2930 np = NULL;
2931 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
2932 bmlen = NFS_ATTR_BITMAP_LEN;
2933 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2934 if (error) {
2935 status = error;
2936 error = 0;
2937 numops = 0; /* don't process any more ops */
2938 } else {
2939 /* find the node for the file handle */
6d2010ae 2940 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
b0d623f7
A
2941 if (error || !np) {
2942 status = NFSERR_BADHANDLE;
2943 error = 0;
2944 np = NULL;
2945 numops = 0; /* don't process any more ops */
2946 }
2947 }
2948 nfsm_chain_add_32(error, &nmrep, op);
2949 nfsm_chain_add_32(error, &nmrep, status);
0a7de745 2950 if (!error && (status == EBADRPC)) {
b0d623f7 2951 error = status;
0a7de745 2952 }
b0d623f7
A
2953 if (np) {
2954 /* only allow returning size, change, and mtime attrs */
2955 NFS_CLEAR_ATTRIBUTES(&rbitmap);
2956 attrbytes = 0;
2957 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2958 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
2959 attrbytes += 2 * NFSX_UNSIGNED;
2960 }
2961 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2962 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
2963 attrbytes += 2 * NFSX_UNSIGNED;
2964 }
2965 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2966 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
2967 attrbytes += 3 * NFSX_UNSIGNED;
2968 }
2969 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
2970 nfsm_chain_add_32(error, &nmrep, attrbytes);
0a7de745 2971 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
b0d623f7 2972 nfsm_chain_add_64(error, &nmrep,
0a7de745
A
2973 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
2974 }
2975 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
b0d623f7 2976 nfsm_chain_add_64(error, &nmrep, np->n_size);
0a7de745 2977 }
b0d623f7
A
2978 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2979 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
2980 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
2981 }
2982 nfs_node_unlock(np);
2983 vnode_put(NFSTOV(np));
2984 np = NULL;
2985 }
2986 /*
2987 * If we hit an error building the reply, we can't easily back up.
2988 * So we'll just update the status and hope the server ignores the
2989 * extra garbage.
2990 */
2991 break;
2992 case NFS_OP_CB_RECALL:
2993 // (STATEID, TRUNCATE, FH) -> (STATUS)
2994 np = NULL;
2995 nfsm_chain_get_stateid(error, &nmreq, &stateid);
2996 nfsm_chain_get_32(error, &nmreq, truncate);
2997 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
2998 if (error) {
2999 status = error;
3000 error = 0;
3001 numops = 0; /* don't process any more ops */
3002 } else {
3003 /* find the node for the file handle */
6d2010ae 3004 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
b0d623f7
A
3005 if (error || !np) {
3006 status = NFSERR_BADHANDLE;
3007 error = 0;
3008 np = NULL;
3009 numops = 0; /* don't process any more ops */
3010 } else if (!(np->n_openflags & N_DELEG_MASK) ||
0a7de745 3011 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
b0d623f7
A
3012 /* delegation stateid state doesn't match */
3013 status = NFSERR_BAD_STATEID;
3014 numops = 0; /* don't process any more ops */
3015 }
0a7de745 3016 if (!status) { /* add node to recall queue, and wake socket thread */
6d2010ae 3017 nfs4_delegation_return_enqueue(np);
0a7de745 3018 }
b0d623f7
A
3019 if (np) {
3020 nfs_node_unlock(np);
3021 vnode_put(NFSTOV(np));
3022 }
3023 }
3024 nfsm_chain_add_32(error, &nmrep, op);
3025 nfsm_chain_add_32(error, &nmrep, status);
0a7de745 3026 if (!error && (status == EBADRPC)) {
b0d623f7 3027 error = status;
0a7de745 3028 }
b0d623f7
A
3029 break;
3030 case NFS_OP_CB_ILLEGAL:
3031 default:
3032 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3033 status = NFSERR_OP_ILLEGAL;
3034 nfsm_chain_add_32(error, &nmrep, status);
3035 numops = 0; /* don't process any more ops */
3036 break;
3037 }
3038 numres++;
3039 }
3040
3041 if (!status && error) {
0a7de745 3042 if (error == EBADRPC) {
b0d623f7 3043 status = error;
0a7de745 3044 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
b0d623f7 3045 status = NFSERR_RESOURCE;
0a7de745 3046 } else {
b0d623f7 3047 status = NFSERR_SERVERFAULT;
0a7de745 3048 }
b0d623f7
A
3049 error = 0;
3050 }
3051
3052 /* Now, set the numres field */
3053 *pnumres = txdr_unsigned(numres);
3054 nfsm_chain_build_done(error, &nmrep);
3055 nfsm_chain_null(&nmrep);
3056
3057 /* drop the callback reference on the mount */
3058 lck_mtx_lock(nfs_global_mutex);
3059 nmp->nm_cbrefs--;
0a7de745 3060 if (!nmp->nm_cbid) {
b0d623f7 3061 wakeup(&nmp->nm_cbrefs);
0a7de745 3062 }
b0d623f7
A
3063 lck_mtx_unlock(nfs_global_mutex);
3064 break;
3065 }
3066
3067nfsmout:
0a7de745 3068 if (status == EBADRPC) {
316670eb 3069 OSAddAtomic64(1, &nfsstats.rpcinvalid);
0a7de745 3070 }
b0d623f7
A
3071
3072 /* build reply header */
3073 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3074 nfsm_chain_init(&nmrep, mhead);
3075 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3076 nfsm_chain_add_32(error, &nmrep, xid);
3077 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3078 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3079 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3080 if (status & NFSERR_AUTHERR) {
3081 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3082 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3083 } else {
3084 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3085 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3086 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3087 }
3088 } else {
3089 /* reply status */
3090 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3091 /* XXX RPCAUTH_NULL verifier */
3092 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3093 nfsm_chain_add_32(error, &nmrep, 0);
3094 /* accepted status */
3095 switch (status) {
3096 case EPROGUNAVAIL:
3097 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3098 break;
3099 case EPROGMISMATCH:
3100 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3101 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3102 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3103 break;
3104 case EPROCUNAVAIL:
3105 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3106 break;
3107 case EBADRPC:
3108 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3109 break;
3110 default:
3111 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
0a7de745 3112 if (status != NFSERR_RETVOID) {
b0d623f7 3113 nfsm_chain_add_32(error, &nmrep, status);
0a7de745 3114 }
b0d623f7
A
3115 break;
3116 }
3117 }
3118 nfsm_chain_build_done(error, &nmrep);
3119 if (error) {
3120 nfsm_chain_null(&nmrep);
3121 goto out;
3122 }
3123 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3124 if (error) {
3125 printf("nfs cb: mbuf_setnext failed %d\n", error);
3126 goto out;
3127 }
3128 mrest = NULL;
3129 /* Calculate the size of the reply */
3130 replen = 0;
0a7de745 3131 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
b0d623f7 3132 replen += mbuf_len(m);
0a7de745 3133 }
b0d623f7
A
3134 mbuf_pkthdr_setlen(mhead, replen);
3135 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3136 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3137 nfsm_chain_null(&nmrep);
3138
3139 /* send the reply */
3140 bzero(&msg, sizeof(msg));
3141 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3142 mhead = NULL;
0a7de745 3143 if (!error && ((int)sentlen != replen)) {
b0d623f7 3144 error = EWOULDBLOCK;
0a7de745
A
3145 }
3146 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
b0d623f7 3147 error = ETIMEDOUT;
0a7de745 3148 }
b0d623f7 3149out:
0a7de745 3150 if (error) {
b0d623f7 3151 nfsm_chain_cleanup(&nmrep);
0a7de745
A
3152 }
3153 if (mhead) {
b0d623f7 3154 mbuf_freem(mhead);
0a7de745
A
3155 }
3156 if (mrest) {
b0d623f7 3157 mbuf_freem(mrest);
0a7de745
A
3158 }
3159 if (mreq) {
b0d623f7 3160 mbuf_freem(mreq);
0a7de745
A
3161 }
3162 return error;
b0d623f7 3163}
cb323159 3164#endif /* CONFIG_NFS4 */
b0d623f7 3165
6d2010ae
A
3166/*
3167 * Initialize an nfs_rpc_record_state structure.
3168 */
3169void
3170nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3171{
3172 bzero(nrrsp, sizeof(*nrrsp));
3173 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3174}
3175
3176/*
3177 * Clean up an nfs_rpc_record_state structure.
3178 */
3179void
3180nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3181{
3182 if (nrrsp->nrrs_m) {
3183 mbuf_freem(nrrsp->nrrs_m);
3184 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3185 }
3186}
3187
b0d623f7
A
3188/*
3189 * Read the next (marked) RPC record from the socket.
3190 *
3191 * *recvp returns if any data was received.
3192 * *mp returns the next complete RPC record
3193 */
3194int
6d2010ae 3195nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
b0d623f7
A
3196{
3197 struct iovec aio;
3198 struct msghdr msg;
3199 size_t rcvlen;
3200 int error = 0;
3201 mbuf_t m;
3202
3203 *recvp = 0;
3204 *mp = NULL;
3205
3206 /* read the TCP RPC record marker */
3207 while (!error && nrrsp->nrrs_markerleft) {
3208 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
0a7de745 3209 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
b0d623f7
A
3210 aio.iov_len = nrrsp->nrrs_markerleft;
3211 bzero(&msg, sizeof(msg));
3212 msg.msg_iov = &aio;
3213 msg.msg_iovlen = 1;
6d2010ae 3214 error = sock_receive(so, &msg, flags, &rcvlen);
0a7de745 3215 if (error || !rcvlen) {
b0d623f7 3216 break;
0a7de745 3217 }
b0d623f7
A
3218 *recvp = 1;
3219 nrrsp->nrrs_markerleft -= rcvlen;
0a7de745 3220 if (nrrsp->nrrs_markerleft) {
b0d623f7 3221 continue;
0a7de745 3222 }
b0d623f7
A
3223 /* record marker complete */
3224 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3225 if (nrrsp->nrrs_fragleft & 0x80000000) {
3226 nrrsp->nrrs_lastfrag = 1;
3227 nrrsp->nrrs_fragleft &= ~0x80000000;
3228 }
3229 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3230 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
6d2010ae 3231 /* This is SERIOUS! We are out of sync with the sender. */
b0d623f7
A
3232 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3233 error = EFBIG;
3234 }
3235 }
3236
3237 /* read the TCP RPC record fragment */
3238 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3239 m = NULL;
3240 rcvlen = nrrsp->nrrs_fragleft;
6d2010ae 3241 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
0a7de745 3242 if (error || !rcvlen || !m) {
b0d623f7 3243 break;
0a7de745 3244 }
b0d623f7
A
3245 *recvp = 1;
3246 /* append mbufs to list */
3247 nrrsp->nrrs_fragleft -= rcvlen;
3248 if (!nrrsp->nrrs_m) {
3249 nrrsp->nrrs_m = m;
3250 } else {
3251 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3252 if (error) {
3253 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3254 mbuf_freem(m);
3255 break;
3256 }
3257 }
0a7de745 3258 while (mbuf_next(m)) {
b0d623f7 3259 m = mbuf_next(m);
0a7de745 3260 }
b0d623f7
A
3261 nrrsp->nrrs_mlast = m;
3262 }
3263
3264 /* done reading fragment? */
3265 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3266 /* reset socket fragment parsing state */
3267 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3268 if (nrrsp->nrrs_lastfrag) {
3269 /* RPC record complete */
3270 *mp = nrrsp->nrrs_m;
3271 /* reset socket record parsing state */
3272 nrrsp->nrrs_reclen = 0;
3273 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3274 nrrsp->nrrs_lastfrag = 0;
3275 }
3276 }
3277
0a7de745 3278 return error;
b0d623f7
A
3279}
3280
3281
3282
1c79356b 3283/*
2d21ac55
A
3284 * The NFS client send routine.
3285 *
3286 * Send the given NFS request out the mount's socket.
3287 * Holds nfs_sndlock() for the duration of this call.
3288 *
3289 * - check for request termination (sigintr)
b0d623f7 3290 * - wait for reconnect, if necessary
2d21ac55
A
3291 * - UDP: check the congestion window
3292 * - make a copy of the request to send
3293 * - UDP: update the congestion window
3294 * - send the request
3295 *
3296 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3297 * rexmit count is also updated if this isn't the first send.
3298 *
3299 * If the send is not successful, make sure R_MUSTRESEND is set.
3300 * If this wasn't the first transmit, set R_RESENDERR.
3301 * Also, undo any UDP congestion window changes made.
3302 *
3303 * If the error appears to indicate that the socket should
3304 * be reconnected, mark the socket for reconnection.
3305 *
3306 * Only return errors when the request should be aborted.
1c79356b 3307 */
1c79356b 3308int
2d21ac55 3309nfs_send(struct nfsreq *req, int wait)
1c79356b 3310{
2d21ac55 3311 struct nfsmount *nmp;
6d2010ae 3312 struct nfs_socket *nso;
36401178 3313 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
2d21ac55
A
3314 struct msghdr msg;
3315 struct sockaddr *sendnam;
3316 mbuf_t mreqcopy;
3317 size_t sentlen = 0;
cb323159 3318 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
1c79356b 3319
2d21ac55
A
3320again:
3321 error = nfs_sndlock(req);
b0d623f7
A
3322 if (error) {
3323 lck_mtx_lock(&req->r_mtx);
3324 req->r_error = error;
3325 req->r_flags &= ~R_SENDING;
3326 lck_mtx_unlock(&req->r_mtx);
0a7de745 3327 return error;
b0d623f7 3328 }
2d21ac55 3329
6d2010ae 3330 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
2d21ac55
A
3331 if (error) {
3332 nfs_sndunlock(req);
b0d623f7
A
3333 lck_mtx_lock(&req->r_mtx);
3334 req->r_error = error;
3335 req->r_flags &= ~R_SENDING;
3336 lck_mtx_unlock(&req->r_mtx);
0a7de745 3337 return error;
2d21ac55
A
3338 }
3339 nmp = req->r_nmp;
3340 sotype = nmp->nm_sotype;
3341
b0d623f7
A
3342 /*
3343 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3344 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3345 */
3346 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3347 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3348 error = ETIMEDOUT;
2d21ac55 3349 nfs_sndunlock(req);
b0d623f7
A
3350 lck_mtx_lock(&req->r_mtx);
3351 req->r_error = error;
3352 req->r_flags &= ~R_SENDING;
3353 lck_mtx_unlock(&req->r_mtx);
0a7de745 3354 return error;
2d21ac55
A
3355 }
3356
3357 /* If the socket needs reconnection, do that now. */
3358 /* wait until socket is ready - unless this request is part of setup */
3359 lck_mtx_lock(&nmp->nm_lock);
3360 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3361 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
0a7de745 3362 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
2d21ac55 3363 slpflag |= PCATCH;
0a7de745 3364 }
2d21ac55
A
3365 lck_mtx_unlock(&nmp->nm_lock);
3366 nfs_sndunlock(req);
3367 if (!wait) {
3368 lck_mtx_lock(&req->r_mtx);
b0d623f7 3369 req->r_flags &= ~R_SENDING;
2d21ac55
A
3370 req->r_flags |= R_MUSTRESEND;
3371 req->r_rtt = 0;
3372 lck_mtx_unlock(&req->r_mtx);
0a7de745 3373 return 0;
2d21ac55 3374 }
39236c6e 3375 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
2d21ac55
A
3376 lck_mtx_lock(&req->r_mtx);
3377 req->r_flags &= ~R_MUSTRESEND;
3378 req->r_rtt = 0;
3379 lck_mtx_unlock(&req->r_mtx);
3380 lck_mtx_lock(&nmp->nm_lock);
3381 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3382 /* don't bother waiting if the socket thread won't be reconnecting it */
0a7de745 3383 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2d21ac55
A
3384 error = EIO;
3385 break;
3386 }
fe8ab488 3387 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
b0d623f7
A
3388 struct timeval now;
3389 microuptime(&now);
3390 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3391 /* soft mount in reconnect for a while... terminate ASAP */
316670eb 3392 OSAddAtomic64(1, &nfsstats.rpctimeouts);
b0d623f7
A
3393 req->r_flags |= R_SOFTTERM;
3394 req->r_error = error = ETIMEDOUT;
3395 break;
3396 }
3397 }
2d21ac55
A
3398 /* make sure socket thread is running, then wait */
3399 nfs_mount_sock_thread_wake(nmp);
0a7de745 3400 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
2d21ac55 3401 break;
0a7de745
A
3402 }
3403 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
36401178 3404 slpflag = 0;
2d21ac55
A
3405 }
3406 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7
A
3407 if (error) {
3408 lck_mtx_lock(&req->r_mtx);
3409 req->r_error = error;
3410 req->r_flags &= ~R_SENDING;
3411 lck_mtx_unlock(&req->r_mtx);
0a7de745 3412 return error;
b0d623f7 3413 }
2d21ac55
A
3414 goto again;
3415 }
6d2010ae
A
3416 nso = nmp->nm_nso;
3417 /* note that we're using the mount's socket to do the send */
3418 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
2d21ac55 3419 lck_mtx_unlock(&nmp->nm_lock);
6d2010ae 3420 if (!nso) {
2d21ac55
A
3421 nfs_sndunlock(req);
3422 lck_mtx_lock(&req->r_mtx);
b0d623f7 3423 req->r_flags &= ~R_SENDING;
2d21ac55
A
3424 req->r_flags |= R_MUSTRESEND;
3425 req->r_rtt = 0;
3426 lck_mtx_unlock(&req->r_mtx);
0a7de745 3427 return 0;
2d21ac55
A
3428 }
3429
3430 lck_mtx_lock(&req->r_mtx);
3431 rexmit = (req->r_flags & R_SENT);
3432
3433 if (sotype == SOCK_DGRAM) {
3434 lck_mtx_lock(&nmp->nm_lock);
3435 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3436 /* if we can't send this out yet, wait on the cwnd queue */
6d2010ae 3437 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
2d21ac55
A
3438 lck_mtx_unlock(&nmp->nm_lock);
3439 nfs_sndunlock(req);
b0d623f7 3440 req->r_flags &= ~R_SENDING;
2d21ac55
A
3441 req->r_flags |= R_MUSTRESEND;
3442 lck_mtx_unlock(&req->r_mtx);
3443 if (!wait) {
3444 req->r_rtt = 0;
0a7de745 3445 return 0;
2d21ac55
A
3446 }
3447 lck_mtx_lock(&nmp->nm_lock);
3448 while (nmp->nm_sent >= nmp->nm_cwnd) {
0a7de745 3449 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
2d21ac55 3450 break;
0a7de745 3451 }
2d21ac55 3452 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
36401178
A
3453 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3454 slpflag = 0;
2d21ac55
A
3455 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3456 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3457 req->r_cchain.tqe_next = NFSREQNOLIST;
3458 }
2d21ac55
A
3459 }
3460 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55
A
3461 goto again;
3462 }
1c79356b 3463 /*
2d21ac55
A
3464 * We update these *before* the send to avoid racing
3465 * against others who may be looking to send requests.
1c79356b 3466 */
2d21ac55
A
3467 if (!rexmit) {
3468 /* first transmit */
3469 req->r_flags |= R_CWND;
3470 nmp->nm_sent += NFS_CWNDSCALE;
3471 } else {
3472 /*
3473 * When retransmitting, turn timing off
316670eb 3474 * and divide congestion window by 2.
2d21ac55
A
3475 */
3476 req->r_flags &= ~R_TIMING;
3477 nmp->nm_cwnd >>= 1;
0a7de745 3478 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
2d21ac55 3479 nmp->nm_cwnd = NFS_CWNDSCALE;
0a7de745 3480 }
1c79356b 3481 }
2d21ac55
A
3482 lck_mtx_unlock(&nmp->nm_lock);
3483 }
3484
3485 req->r_flags &= ~R_MUSTRESEND;
3486 lck_mtx_unlock(&req->r_mtx);
3487
3488 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
0a7de745 3489 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
2d21ac55 3490 if (error) {
0a7de745 3491 if (wait) {
2d21ac55 3492 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
0a7de745 3493 }
2d21ac55
A
3494 nfs_sndunlock(req);
3495 lck_mtx_lock(&req->r_mtx);
b0d623f7 3496 req->r_flags &= ~R_SENDING;
2d21ac55
A
3497 req->r_flags |= R_MUSTRESEND;
3498 req->r_rtt = 0;
3499 lck_mtx_unlock(&req->r_mtx);
0a7de745 3500 return 0;
2d21ac55
A
3501 }
3502
3503 bzero(&msg, sizeof(msg));
6d2010ae
A
3504 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3505 msg.msg_name = (caddr_t)sendnam;
3506 msg.msg_namelen = sendnam->sa_len;
2d21ac55 3507 }
cb323159 3508 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
6d2010ae 3509 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
fe8ab488 3510 if (error || (sentlen != req->r_mreqlen)) {
39236c6e 3511 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
0a7de745 3512 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
fe8ab488 3513 }
0a7de745
A
3514
3515 if (!error && (sentlen != req->r_mreqlen)) {
2d21ac55 3516 error = EWOULDBLOCK;
0a7de745 3517 }
2d21ac55
A
3518 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3519
3520 lck_mtx_lock(&req->r_mtx);
b0d623f7 3521 req->r_flags &= ~R_SENDING;
2d21ac55 3522 req->r_rtt = 0;
0a7de745 3523 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
2d21ac55 3524 req->r_rexmit = NFS_MAXREXMIT;
0a7de745 3525 }
2d21ac55
A
3526
3527 if (!error) {
3528 /* SUCCESS */
3529 req->r_flags &= ~R_RESENDERR;
0a7de745 3530 if (rexmit) {
316670eb 3531 OSAddAtomic64(1, &nfsstats.rpcretries);
0a7de745 3532 }
2d21ac55
A
3533 req->r_flags |= R_SENT;
3534 if (req->r_flags & R_WAITSENT) {
3535 req->r_flags &= ~R_WAITSENT;
3536 wakeup(req);
3537 }
3538 nfs_sndunlock(req);
3539 lck_mtx_unlock(&req->r_mtx);
0a7de745 3540 return 0;
2d21ac55
A
3541 }
3542
3543 /* send failed */
3544 req->r_flags |= R_MUSTRESEND;
0a7de745 3545 if (rexmit) {
2d21ac55 3546 req->r_flags |= R_RESENDERR;
0a7de745
A
3547 }
3548 if ((error == EINTR) || (error == ERESTART)) {
2d21ac55 3549 req->r_error = error;
0a7de745 3550 }
2d21ac55
A
3551 lck_mtx_unlock(&req->r_mtx);
3552
3553 if (sotype == SOCK_DGRAM) {
1c79356b 3554 /*
2d21ac55
A
3555 * Note: even though a first send may fail, we consider
3556 * the request sent for congestion window purposes.
3557 * So we don't need to undo any of the changes made above.
1c79356b 3558 */
1c79356b 3559 /*
2d21ac55
A
3560 * Socket errors ignored for connectionless sockets??
3561 * For now, ignore them all
1c79356b 3562 */
2d21ac55 3563 if ((error != EINTR) && (error != ERESTART) &&
6d2010ae 3564 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
2d21ac55 3565 int clearerror = 0, optlen = sizeof(clearerror);
6d2010ae 3566 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
2d21ac55 3567#ifdef NFS_SOCKET_DEBUGGING
0a7de745 3568 if (clearerror) {
39236c6e 3569 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
0a7de745
A
3570 error, clearerror);
3571 }
2d21ac55 3572#endif
1c79356b 3573 }
2d21ac55
A
3574 }
3575
3576 /* check if it appears we should reconnect the socket */
3577 switch (error) {
3578 case EWOULDBLOCK:
3579 /* if send timed out, reconnect if on TCP */
0a7de745 3580 if (sotype != SOCK_STREAM) {
2d21ac55 3581 break;
0a7de745 3582 }
2d21ac55
A
3583 case EPIPE:
3584 case EADDRNOTAVAIL:
3585 case ENETDOWN:
3586 case ENETUNREACH:
3587 case ENETRESET:
3588 case ECONNABORTED:
3589 case ECONNRESET:
3590 case ENOTCONN:
3591 case ESHUTDOWN:
3592 case ECONNREFUSED:
3593 case EHOSTDOWN:
3594 case EHOSTUNREACH:
0a7de745 3595 /* case ECANCELED??? */
2d21ac55
A
3596 needrecon = 1;
3597 break;
3598 }
6d2010ae 3599 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
39236c6e 3600 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
2d21ac55
A
3601 nfs_need_reconnect(nmp);
3602 }
3603
3604 nfs_sndunlock(req);
3605
0a7de745 3606 if (nfs_is_dead(error, nmp)) {
3e170ce0 3607 error = EIO;
0a7de745 3608 }
3e170ce0 3609
2d21ac55
A
3610 /*
3611 * Don't log some errors:
3612 * EPIPE errors may be common with servers that drop idle connections.
3613 * EADDRNOTAVAIL may occur on network transitions.
3614 * ENOTCONN may occur under some network conditions.
3615 */
0a7de745 3616 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
2d21ac55 3617 error = 0;
0a7de745
A
3618 }
3619 if (error && (error != EINTR) && (error != ERESTART)) {
2d21ac55 3620 log(LOG_INFO, "nfs send error %d for server %s\n", error,
0a7de745
A
3621 !req->r_nmp ? "<unmounted>" :
3622 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3623 }
2d21ac55
A
3624
3625 /* prefer request termination error over other errors */
3626 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
0a7de745 3627 if (error2) {
2d21ac55 3628 error = error2;
0a7de745 3629 }
2d21ac55
A
3630
3631 /* only allow the following errors to be returned */
3632 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
0a7de745 3633 (error != ENXIO) && (error != ETIMEDOUT)) {
a39ff7e2
A
3634 /*
3635 * We got some error we don't know what do do with,
3636 * i.e., we're not reconnecting, we map it to
3637 * EIO. Presumably our send failed and we better tell
3638 * the caller so they don't wait for a reply that is
3639 * never going to come. If we are reconnecting we
3640 * return 0 and the request will be resent.
3641 */
3642 error = needrecon ? 0 : EIO;
0a7de745
A
3643 }
3644 return error;
2d21ac55
A
3645}
3646
3647/*
3648 * NFS client socket upcalls
3649 *
3650 * Pull RPC replies out of an NFS mount's socket and match them
3651 * up with the pending request.
3652 *
3653 * The datagram code is simple because we always get whole
3654 * messages out of the socket.
3655 *
3656 * The stream code is more involved because we have to parse
3657 * the RPC records out of the stream.
3658 */
3659
3660/* NFS client UDP socket upcall */
b0d623f7 3661void
2d21ac55
A
3662nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3663{
3664 struct nfsmount *nmp = arg;
6d2010ae 3665 struct nfs_socket *nso = nmp->nm_nso;
2d21ac55
A
3666 size_t rcvlen;
3667 mbuf_t m;
3668 int error = 0;
3669
0a7de745 3670 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
2d21ac55 3671 return;
0a7de745 3672 }
2d21ac55
A
3673
3674 do {
6d2010ae 3675 /* make sure we're on the current socket */
0a7de745 3676 if (!nso || (nso->nso_so != so)) {
6d2010ae 3677 return;
0a7de745 3678 }
6d2010ae 3679
2d21ac55
A
3680 m = NULL;
3681 rcvlen = 1000000;
3682 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
0a7de745 3683 if (m) {
2d21ac55 3684 nfs_request_match_reply(nmp, m);
0a7de745 3685 }
2d21ac55
A
3686 } while (m && !error);
3687
3688 if (error && (error != EWOULDBLOCK)) {
3689 /* problems with the socket... mark for reconnection */
39236c6e 3690 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
2d21ac55
A
3691 nfs_need_reconnect(nmp);
3692 }
3693}
3694
3695/* NFS client TCP socket upcall */
b0d623f7 3696void
2d21ac55
A
3697nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3698{
3699 struct nfsmount *nmp = arg;
6d2010ae
A
3700 struct nfs_socket *nso = nmp->nm_nso;
3701 struct nfs_rpc_record_state nrrs;
2d21ac55
A
3702 mbuf_t m;
3703 int error = 0;
6d2010ae 3704 int recv = 1;
fe8ab488 3705 int wup = 0;
d12e1678 3706
0a7de745 3707 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
2d21ac55 3708 return;
0a7de745 3709 }
2d21ac55
A
3710
3711 /* make sure we're on the current socket */
2d21ac55 3712 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
3713 nso = nmp->nm_nso;
3714 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
2d21ac55
A
3715 lck_mtx_unlock(&nmp->nm_lock);
3716 return;
3717 }
6d2010ae 3718 lck_mtx_unlock(&nmp->nm_lock);
2d21ac55 3719
6d2010ae
A
3720 /* make sure this upcall should be trying to do work */
3721 lck_mtx_lock(&nso->nso_lock);
0a7de745 3722 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
6d2010ae
A
3723 lck_mtx_unlock(&nso->nso_lock);
3724 return;
2d21ac55 3725 }
6d2010ae
A
3726 nso->nso_flags |= NSO_UPCALL;
3727 nrrs = nso->nso_rrs;
3728 lck_mtx_unlock(&nso->nso_lock);
2d21ac55 3729
6d2010ae
A
3730 /* loop while we make error-free progress */
3731 while (!error && recv) {
3732 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
0a7de745 3733 if (m) { /* match completed response with request */
6d2010ae 3734 nfs_request_match_reply(nmp, m);
0a7de745 3735 }
2d21ac55 3736 }
1c79356b 3737
fe8ab488
A
3738 /* Update the sockets's rpc parsing state */
3739 lck_mtx_lock(&nso->nso_lock);
3740 nso->nso_rrs = nrrs;
0a7de745 3741 if (nso->nso_flags & NSO_DISCONNECTING) {
fe8ab488 3742 wup = 1;
0a7de745 3743 }
fe8ab488
A
3744 nso->nso_flags &= ~NSO_UPCALL;
3745 lck_mtx_unlock(&nso->nso_lock);
0a7de745 3746 if (wup) {
fe8ab488 3747 wakeup(&nso->nso_flags);
0a7de745 3748 }
fe8ab488 3749
2d21ac55 3750#ifdef NFS_SOCKET_DEBUGGING
0a7de745 3751 if (!recv && (error != EWOULDBLOCK)) {
39236c6e 3752 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
0a7de745 3753 }
2d21ac55
A
3754#endif
3755 /* note: no error and no data indicates server closed its end */
3756 if ((error != EWOULDBLOCK) && (error || !recv)) {
3757 /* problems with the socket... mark for reconnection */
39236c6e 3758 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
2d21ac55
A
3759 nfs_need_reconnect(nmp);
3760 }
3761}
3762
3763/*
3764 * "poke" a socket to try to provoke any pending errors
3765 */
b0d623f7 3766void
2d21ac55
A
3767nfs_sock_poke(struct nfsmount *nmp)
3768{
b0d623f7 3769 struct iovec aio;
2d21ac55
A
3770 struct msghdr msg;
3771 size_t len;
3772 int error = 0;
3773 int dummy;
3774
3775 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
3776 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3777 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
fe8ab488
A
3778 /* Nothing to poke */
3779 nmp->nm_sockflags &= ~NMSOCK_POKE;
3780 wakeup(&nmp->nm_sockflags);
2d21ac55
A
3781 lck_mtx_unlock(&nmp->nm_lock);
3782 return;
3783 }
3784 lck_mtx_unlock(&nmp->nm_lock);
b0d623f7 3785 aio.iov_base = &dummy;
2d21ac55
A
3786 aio.iov_len = 0;
3787 len = 0;
3788 bzero(&msg, sizeof(msg));
b0d623f7 3789 msg.msg_iov = &aio;
2d21ac55 3790 msg.msg_iovlen = 1;
6d2010ae 3791 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
39236c6e 3792 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
fe8ab488
A
3793 lck_mtx_lock(&nmp->nm_lock);
3794 nmp->nm_sockflags &= ~NMSOCK_POKE;
3795 wakeup(&nmp->nm_sockflags);
3796 lck_mtx_unlock(&nmp->nm_lock);
316670eb 3797 nfs_is_dead(error, nmp);
2d21ac55
A
3798}
3799
3800/*
3801 * Match an RPC reply with the corresponding request
3802 */
b0d623f7 3803void
2d21ac55
A
3804nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3805{
3806 struct nfsreq *req;
3807 struct nfsm_chain nmrep;
b0d623f7
A
3808 u_int32_t reply = 0, rxid = 0;
3809 int error = 0, asyncioq, t1;
2d21ac55
A
3810
3811 /* Get the xid and check that it is an rpc reply */
3812 nfsm_chain_dissect_init(error, &nmrep, mrep);
3813 nfsm_chain_get_32(error, &nmrep, rxid);
3814 nfsm_chain_get_32(error, &nmrep, reply);
3815 if (error || (reply != RPC_REPLY)) {
316670eb 3816 OSAddAtomic64(1, &nfsstats.rpcinvalid);
2d21ac55
A
3817 mbuf_freem(mrep);
3818 return;
3819 }
3820
3821 /*
3822 * Loop through the request list to match up the reply
3823 * Iff no match, just drop it.
3824 */
3825 lck_mtx_lock(nfs_request_mutex);
3826 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
0a7de745 3827 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
2d21ac55 3828 continue;
0a7de745 3829 }
2d21ac55
A
3830 /* looks like we have it, grab lock and double check */
3831 lck_mtx_lock(&req->r_mtx);
3832 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3833 lck_mtx_unlock(&req->r_mtx);
1c79356b
A
3834 continue;
3835 }
2d21ac55
A
3836 /* Found it.. */
3837 req->r_nmrep = nmrep;
3838 lck_mtx_lock(&nmp->nm_lock);
3839 if (nmp->nm_sotype == SOCK_DGRAM) {
3840 /*
3841 * Update congestion window.
3842 * Do the additive increase of one rpc/rtt.
3843 */
3844 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3845 if (nmp->nm_cwnd <= nmp->nm_sent) {
3846 nmp->nm_cwnd +=
0a7de745 3847 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
2d21ac55 3848 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
0a7de745 3849 if (nmp->nm_cwnd > NFS_MAXCWND) {
2d21ac55 3850 nmp->nm_cwnd = NFS_MAXCWND;
0a7de745 3851 }
2d21ac55
A
3852 }
3853 if (req->r_flags & R_CWND) {
3854 nmp->nm_sent -= NFS_CWNDSCALE;
3855 req->r_flags &= ~R_CWND;
3856 }
3857 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3858 /* congestion window is open, poke the cwnd queue */
3859 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3860 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3861 req2->r_cchain.tqe_next = NFSREQNOLIST;
3862 wakeup(req2);
3863 }
3864 }
1c79356b 3865 /*
2d21ac55
A
3866 * Update rtt using a gain of 0.125 on the mean
3867 * and a gain of 0.25 on the deviation.
1c79356b 3868 */
2d21ac55
A
3869 if (req->r_flags & R_TIMING) {
3870 /*
3871 * Since the timer resolution of
3872 * NFS_HZ is so course, it can often
3873 * result in r_rtt == 0. Since
3874 * r_rtt == N means that the actual
3875 * rtt is between N+dt and N+2-dt ticks,
3876 * add 1.
3877 */
0a7de745 3878 if (proct[req->r_procnum] == 0) {
2d21ac55 3879 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
0a7de745 3880 }
2d21ac55
A
3881 t1 = req->r_rtt + 1;
3882 t1 -= (NFS_SRTT(req) >> 3);
3883 NFS_SRTT(req) += t1;
0a7de745 3884 if (t1 < 0) {
2d21ac55 3885 t1 = -t1;
0a7de745 3886 }
2d21ac55
A
3887 t1 -= (NFS_SDRTT(req) >> 2);
3888 NFS_SDRTT(req) += t1;
3889 }
3890 nmp->nm_timeouts = 0;
3891 lck_mtx_unlock(&nmp->nm_lock);
3892 /* signal anyone waiting on this request */
3893 wakeup(req);
3894 asyncioq = (req->r_callback.rcb_func != NULL);
cb323159 3895#if CONFIG_NFS_GSS
0a7de745 3896 if (nfs_request_using_gss(req)) {
b0d623f7 3897 nfs_gss_clnt_rpcdone(req);
0a7de745 3898 }
cb323159 3899#endif /* CONFIG_NFS_GSS */
2d21ac55
A
3900 lck_mtx_unlock(&req->r_mtx);
3901 lck_mtx_unlock(nfs_request_mutex);
2d21ac55 3902 /* if it's an async RPC with a callback, queue it up */
0a7de745 3903 if (asyncioq) {
2d21ac55 3904 nfs_asyncio_finish(req);
0a7de745 3905 }
2d21ac55
A
3906 break;
3907 }
3908
3909 if (!req) {
3910 /* not matched to a request, so drop it. */
3911 lck_mtx_unlock(nfs_request_mutex);
316670eb 3912 OSAddAtomic64(1, &nfsstats.rpcunexpected);
2d21ac55
A
3913 mbuf_freem(mrep);
3914 }
3915}
3916
3917/*
3918 * Wait for the reply for a given request...
3919 * ...potentially resending the request if necessary.
3920 */
b0d623f7 3921int
2d21ac55
A
3922nfs_wait_reply(struct nfsreq *req)
3923{
cb323159 3924 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
6d2010ae 3925 int error = 0, slpflag, first = 1;
2d21ac55 3926
0a7de745 3927 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
2d21ac55 3928 slpflag = PCATCH;
0a7de745 3929 } else {
2d21ac55 3930 slpflag = 0;
0a7de745 3931 }
2d21ac55
A
3932
3933 lck_mtx_lock(&req->r_mtx);
3934 while (!req->r_nmrep.nmc_mhead) {
0a7de745 3935 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
2d21ac55 3936 break;
0a7de745
A
3937 }
3938 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
2d21ac55 3939 break;
0a7de745 3940 }
2d21ac55
A
3941 /* check if we need to resend */
3942 if (req->r_flags & R_MUSTRESEND) {
39236c6e 3943 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
0a7de745 3944 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
b0d623f7 3945 req->r_flags |= R_SENDING;
2d21ac55 3946 lck_mtx_unlock(&req->r_mtx);
6d2010ae 3947 if (nfs_request_using_gss(req)) {
1c79356b 3948 /*
6d2010ae 3949 * It's an RPCSEC_GSS request.
2d21ac55
A
3950 * Can't just resend the original request
3951 * without bumping the cred sequence number.
3952 * Go back and re-build the request.
1c79356b 3953 */
b0d623f7
A
3954 lck_mtx_lock(&req->r_mtx);
3955 req->r_flags &= ~R_SENDING;
3956 lck_mtx_unlock(&req->r_mtx);
0a7de745 3957 return EAGAIN;
1c79356b 3958 }
2d21ac55
A
3959 error = nfs_send(req, 1);
3960 lck_mtx_lock(&req->r_mtx);
39236c6e 3961 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
0a7de745
A
3962 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
3963 if (error) {
2d21ac55 3964 break;
0a7de745
A
3965 }
3966 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
2d21ac55 3967 break;
0a7de745 3968 }
1c79356b 3969 }
2d21ac55 3970 /* need to poll if we're P_NOREMOTEHANG */
0a7de745 3971 if (nfs_noremotehang(req->r_thread)) {
2d21ac55 3972 ts.tv_sec = 1;
0a7de745 3973 }
36401178 3974 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
6d2010ae 3975 first = slpflag = 0;
1c79356b 3976 }
2d21ac55
A
3977 lck_mtx_unlock(&req->r_mtx);
3978
0a7de745 3979 return error;
1c79356b
A
3980}
3981
3982/*
2d21ac55
A
3983 * An NFS request goes something like this:
3984 * (nb: always frees up mreq mbuf list)
3985 * nfs_request_create()
3986 * - allocates a request struct if one is not provided
3987 * - initial fill-in of the request struct
3988 * nfs_request_add_header()
3989 * - add the RPC header
3990 * nfs_request_send()
3991 * - link it into list
3992 * - call nfs_send() for first transmit
3993 * nfs_request_wait()
3994 * - call nfs_wait_reply() to wait for the reply
3995 * nfs_request_finish()
3996 * - break down rpc header and return with error or nfs reply
3997 * pointed to by nmrep.
3998 * nfs_request_rele()
3999 * nfs_request_destroy()
4000 * - clean up the request struct
4001 * - free the request struct if it was allocated by nfs_request_create()
4002 */
4003
4004/*
4005 * Set up an NFS request struct (allocating if no request passed in).
1c79356b
A
4006 */
4007int
2d21ac55
A
4008nfs_request_create(
4009 nfsnode_t np,
0a7de745 4010 mount_t mp, /* used only if !np */
2d21ac55
A
4011 struct nfsm_chain *nmrest,
4012 int procnum,
4013 thread_t thd,
4014 kauth_cred_t cred,
4015 struct nfsreq **reqp)
1c79356b 4016{
2d21ac55 4017 struct nfsreq *req, *newreq = NULL;
1c79356b 4018 struct nfsmount *nmp;
1c79356b 4019
2d21ac55
A
4020 req = *reqp;
4021 if (!req) {
4022 /* allocate a new NFS request structure */
4023 MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
4024 if (!newreq) {
4025 mbuf_freem(nmrest->nmc_mhead);
4026 nmrest->nmc_mhead = NULL;
0a7de745 4027 return ENOMEM;
2d21ac55
A
4028 }
4029 req = newreq;
4030 }
55e303ae 4031
2d21ac55 4032 bzero(req, sizeof(*req));
0a7de745 4033 if (req == newreq) {
2d21ac55 4034 req->r_flags = R_ALLOCATED;
0a7de745 4035 }
1c79356b 4036
2d21ac55 4037 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
fe8ab488 4038 if (nfs_mount_gone(nmp)) {
0a7de745 4039 if (newreq) {
2d21ac55 4040 FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
0a7de745
A
4041 }
4042 return ENXIO;
2d21ac55
A
4043 }
4044 lck_mtx_lock(&nmp->nm_lock);
0a7de745 4045 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
fe8ab488 4046 (nmp->nm_state & NFSSTA_TIMEO)) {
2d21ac55
A
4047 lck_mtx_unlock(&nmp->nm_lock);
4048 mbuf_freem(nmrest->nmc_mhead);
4049 nmrest->nmc_mhead = NULL;
0a7de745 4050 if (newreq) {
2d21ac55 4051 FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
0a7de745
A
4052 }
4053 return ENXIO;
1c79356b 4054 }
0a7de745
A
4055
4056 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
316670eb 4057 OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
0a7de745
A
4058 }
4059 if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) {
2d21ac55 4060 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
0a7de745 4061 }
55e303ae 4062
2d21ac55
A
4063 lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
4064 req->r_nmp = nmp;
fe8ab488 4065 nmp->nm_ref++;
2d21ac55
A
4066 req->r_np = np;
4067 req->r_thread = thd;
0a7de745 4068 if (!thd) {
6d2010ae 4069 req->r_flags |= R_NOINTR;
0a7de745 4070 }
2d21ac55
A
4071 if (IS_VALID_CRED(cred)) {
4072 kauth_cred_ref(cred);
4073 req->r_cred = cred;
1c79356b 4074 }
2d21ac55 4075 req->r_procnum = procnum;
0a7de745 4076 if (proct[procnum] > 0) {
2d21ac55 4077 req->r_flags |= R_TIMING;
0a7de745 4078 }
2d21ac55
A
4079 req->r_nmrep.nmc_mhead = NULL;
4080 SLIST_INIT(&req->r_gss_seqlist);
4081 req->r_achain.tqe_next = NFSREQNOLIST;
4082 req->r_rchain.tqe_next = NFSREQNOLIST;
4083 req->r_cchain.tqe_next = NFSREQNOLIST;
4084
6d2010ae 4085 /* set auth flavor to use for request */
0a7de745 4086 if (!req->r_cred) {
6d2010ae 4087 req->r_auth = RPCAUTH_NONE;
0a7de745 4088 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
6d2010ae 4089 req->r_auth = req->r_np->n_auth;
0a7de745 4090 } else {
6d2010ae 4091 req->r_auth = nmp->nm_auth;
0a7de745 4092 }
6d2010ae 4093
2d21ac55
A
4094 lck_mtx_unlock(&nmp->nm_lock);
4095
4096 /* move the request mbuf chain to the nfsreq */
4097 req->r_mrest = nmrest->nmc_mhead;
4098 nmrest->nmc_mhead = NULL;
4099
4100 req->r_flags |= R_INITTED;
4101 req->r_refs = 1;
0a7de745 4102 if (newreq) {
2d21ac55 4103 *reqp = req;
0a7de745
A
4104 }
4105 return 0;
2d21ac55
A
4106}
4107
4108/*
4109 * Clean up and free an NFS request structure.
4110 */
4111void
4112nfs_request_destroy(struct nfsreq *req)
4113{
3e170ce0 4114 struct nfsmount *nmp;
b0d623f7 4115 int clearjbtimeo = 0;
2d21ac55 4116
cb323159
A
4117#if CONFIG_NFS_GSS
4118 struct gss_seq *gsp, *ngsp;
4119#endif
4120
0a7de745 4121 if (!req || !(req->r_flags & R_INITTED)) {
2d21ac55 4122 return;
0a7de745 4123 }
5ba3f43e 4124 nmp = req->r_nmp;
2d21ac55 4125 req->r_flags &= ~R_INITTED;
0a7de745 4126 if (req->r_lflags & RL_QUEUED) {
2d21ac55 4127 nfs_reqdequeue(req);
0a7de745 4128 }
fe8ab488 4129
3e170ce0 4130 if (req->r_achain.tqe_next != NFSREQNOLIST) {
0a7de745 4131 /*
fe8ab488
A
4132 * Still on an async I/O queue?
4133 * %%% But which one, we may be on a local iod.
4134 */
2d21ac55 4135 lck_mtx_lock(nfsiod_mutex);
3e170ce0 4136 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
2d21ac55
A
4137 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4138 req->r_achain.tqe_next = NFSREQNOLIST;
4139 }
4140 lck_mtx_unlock(nfsiod_mutex);
4141 }
fe8ab488 4142
b0d623f7 4143 lck_mtx_lock(&req->r_mtx);
2d21ac55
A
4144 if (nmp) {
4145 lck_mtx_lock(&nmp->nm_lock);
6d2010ae
A
4146 if (req->r_flags & R_CWND) {
4147 /* Decrement the outstanding request count. */
4148 req->r_flags &= ~R_CWND;
4149 nmp->nm_sent -= NFS_CWNDSCALE;
4150 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4151 /* congestion window is open, poke the cwnd queue */
4152 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4153 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4154 req2->r_cchain.tqe_next = NFSREQNOLIST;
4155 wakeup(req2);
4156 }
4157 }
3e170ce0
A
4158 assert((req->r_flags & R_RESENDQ) == 0);
4159 /* XXX should we just remove this conditional, we should have a reference if we're resending */
2d21ac55
A
4160 if (req->r_rchain.tqe_next != NFSREQNOLIST) {
4161 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4162 req->r_rchain.tqe_next = NFSREQNOLIST;
0a7de745 4163 if (req->r_flags & R_RESENDQ) {
b0d623f7 4164 req->r_flags &= ~R_RESENDQ;
0a7de745 4165 }
2d21ac55
A
4166 }
4167 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4168 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4169 req->r_cchain.tqe_next = NFSREQNOLIST;
4170 }
b0d623f7
A
4171 if (req->r_flags & R_JBTPRINTFMSG) {
4172 req->r_flags &= ~R_JBTPRINTFMSG;
4173 nmp->nm_jbreqs--;
4174 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4175 }
2d21ac55
A
4176 lck_mtx_unlock(&nmp->nm_lock);
4177 }
2d21ac55 4178 lck_mtx_unlock(&req->r_mtx);
fe8ab488 4179
0a7de745 4180 if (clearjbtimeo) {
b0d623f7 4181 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
0a7de745
A
4182 }
4183 if (req->r_mhead) {
2d21ac55 4184 mbuf_freem(req->r_mhead);
0a7de745 4185 } else if (req->r_mrest) {
2d21ac55 4186 mbuf_freem(req->r_mrest);
0a7de745
A
4187 }
4188 if (req->r_nmrep.nmc_mhead) {
2d21ac55 4189 mbuf_freem(req->r_nmrep.nmc_mhead);
0a7de745
A
4190 }
4191 if (IS_VALID_CRED(req->r_cred)) {
2d21ac55 4192 kauth_cred_unref(&req->r_cred);
0a7de745 4193 }
cb323159 4194#if CONFIG_NFS_GSS
0a7de745 4195 if (nfs_request_using_gss(req)) {
2d21ac55 4196 nfs_gss_clnt_rpcdone(req);
0a7de745 4197 }
2d21ac55 4198 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
0a7de745
A
4199 FREE(gsp, M_TEMP);
4200 if (req->r_gss_ctx) {
2d21ac55 4201 nfs_gss_clnt_ctx_unref(req);
0a7de745 4202 }
cb323159 4203#endif /* CONFIG_NFS_GSS */
0a7de745 4204 if (req->r_wrongsec) {
6d2010ae 4205 FREE(req->r_wrongsec, M_TEMP);
0a7de745
A
4206 }
4207 if (nmp) {
fe8ab488 4208 nfs_mount_rele(nmp);
0a7de745 4209 }
2d21ac55 4210 lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
0a7de745 4211 if (req->r_flags & R_ALLOCATED) {
2d21ac55 4212 FREE_ZONE(req, sizeof(*req), M_NFSREQ);
0a7de745 4213 }
2d21ac55
A
4214}
4215
4216void
4217nfs_request_ref(struct nfsreq *req, int locked)
4218{
0a7de745 4219 if (!locked) {
2d21ac55 4220 lck_mtx_lock(&req->r_mtx);
0a7de745
A
4221 }
4222 if (req->r_refs <= 0) {
2d21ac55 4223 panic("nfsreq reference error");
0a7de745 4224 }
2d21ac55 4225 req->r_refs++;
0a7de745 4226 if (!locked) {
2d21ac55 4227 lck_mtx_unlock(&req->r_mtx);
0a7de745 4228 }
2d21ac55
A
4229}
4230
4231void
4232nfs_request_rele(struct nfsreq *req)
4233{
4234 int destroy;
4235
4236 lck_mtx_lock(&req->r_mtx);
0a7de745 4237 if (req->r_refs <= 0) {
2d21ac55 4238 panic("nfsreq reference underflow");
0a7de745 4239 }
2d21ac55
A
4240 req->r_refs--;
4241 destroy = (req->r_refs == 0);
4242 lck_mtx_unlock(&req->r_mtx);
0a7de745 4243 if (destroy) {
2d21ac55 4244 nfs_request_destroy(req);
0a7de745 4245 }
2d21ac55
A
4246}
4247
4248
4249/*
4250 * Add an (updated) RPC header with authorization to an NFS request.
4251 */
4252int
4253nfs_request_add_header(struct nfsreq *req)
4254{
4255 struct nfsmount *nmp;
6d2010ae 4256 int error = 0;
2d21ac55
A
4257 mbuf_t m;
4258
4259 /* free up any previous header */
4260 if ((m = req->r_mhead)) {
0a7de745 4261 while (m && (m != req->r_mrest)) {
2d21ac55 4262 m = mbuf_free(m);
0a7de745 4263 }
2d21ac55
A
4264 req->r_mhead = NULL;
4265 }
4266
5ba3f43e 4267 nmp = req->r_nmp;
0a7de745
A
4268 if (nfs_mount_gone(nmp)) {
4269 return ENXIO;
4270 }
2d21ac55 4271
6d2010ae 4272 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
0a7de745
A
4273 if (error) {
4274 return error;
4275 }
2d21ac55
A
4276
4277 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
5ba3f43e 4278 nmp = req->r_nmp;
0a7de745
A
4279 if (nfs_mount_gone(nmp)) {
4280 return ENXIO;
4281 }
2d21ac55 4282 lck_mtx_lock(&nmp->nm_lock);
0a7de745 4283 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
2d21ac55 4284 req->r_retry = nmp->nm_retry;
0a7de745
A
4285 } else {
4286 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4287 }
2d21ac55
A
4288 lck_mtx_unlock(&nmp->nm_lock);
4289
0a7de745 4290 return error;
2d21ac55
A
4291}
4292
4293
4294/*
4295 * Queue an NFS request up and send it out.
4296 */
4297int
4298nfs_request_send(struct nfsreq *req, int wait)
4299{
4300 struct nfsmount *nmp;
4301 struct timeval now;
4302
b0d623f7
A
4303 lck_mtx_lock(&req->r_mtx);
4304 req->r_flags |= R_SENDING;
4305 lck_mtx_unlock(&req->r_mtx);
4306
2d21ac55 4307 lck_mtx_lock(nfs_request_mutex);
1c79356b 4308
5ba3f43e 4309 nmp = req->r_nmp;
fe8ab488 4310 if (nfs_mount_gone(nmp)) {
2d21ac55 4311 lck_mtx_unlock(nfs_request_mutex);
0a7de745 4312 return ENXIO;
55e303ae 4313 }
1c79356b 4314
2d21ac55
A
4315 microuptime(&now);
4316 if (!req->r_start) {
4317 req->r_start = now.tv_sec;
4318 req->r_lastmsg = now.tv_sec -
4319 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
1c79356b 4320 }
1c79356b 4321
316670eb 4322 OSAddAtomic64(1, &nfsstats.rpcrequests);
2d21ac55 4323
1c79356b
A
4324 /*
4325 * Chain request into list of outstanding requests. Be sure
4326 * to put it LAST so timer finds oldest requests first.
2d21ac55
A
4327 * Make sure that the request queue timer is running
4328 * to check for possible request timeout.
1c79356b 4329 */
2d21ac55
A
4330 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4331 req->r_lflags |= RL_QUEUED;
4332 if (!nfs_request_timer_on) {
4333 nfs_request_timer_on = 1;
4334 nfs_interval_timer_start(nfs_request_timer_call,
0a7de745 4335 NFS_REQUESTDELAY);
2d21ac55
A
4336 }
4337 lck_mtx_unlock(nfs_request_mutex);
1c79356b 4338
2d21ac55 4339 /* Send the request... */
0a7de745 4340 return nfs_send(req, wait);
2d21ac55
A
4341}
4342
4343/*
4344 * Call nfs_wait_reply() to wait for the reply.
4345 */
4346void
4347nfs_request_wait(struct nfsreq *req)
4348{
4349 req->r_error = nfs_wait_reply(req);
4350}
55e303ae 4351
2d21ac55
A
4352/*
4353 * Finish up an NFS request by dequeueing it and
4354 * doing the initial NFS request reply processing.
4355 */
4356int
4357nfs_request_finish(
4358 struct nfsreq *req,
4359 struct nfsm_chain *nmrepp,
4360 int *status)
4361{
4362 struct nfsmount *nmp;
4363 mbuf_t mrep;
4364 int verf_type = 0;
4365 uint32_t verf_len = 0;
4366 uint32_t reply_status = 0;
4367 uint32_t rejected_status = 0;
4368 uint32_t auth_status = 0;
4369 uint32_t accepted_status = 0;
4370 struct nfsm_chain nmrep;
6d2010ae 4371 int error, clearjbtimeo;
1c79356b 4372
2d21ac55 4373 error = req->r_error;
1c79356b 4374
0a7de745 4375 if (nmrepp) {
2d21ac55 4376 nmrepp->nmc_mhead = NULL;
0a7de745 4377 }
1c79356b 4378
2d21ac55
A
4379 /* RPC done, unlink the request. */
4380 nfs_reqdequeue(req);
1c79356b 4381
2d21ac55 4382 mrep = req->r_nmrep.nmc_mhead;
55e303ae 4383
5ba3f43e 4384 nmp = req->r_nmp;
1c79356b 4385
b0d623f7 4386 if ((req->r_flags & R_CWND) && nmp) {
6d2010ae
A
4387 /*
4388 * Decrement the outstanding request count.
4389 */
2d21ac55
A
4390 req->r_flags &= ~R_CWND;
4391 lck_mtx_lock(&nmp->nm_lock);
4392 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4393 nmp->nm_sent -= NFS_CWNDSCALE;
4394 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4395 /* congestion window is open, poke the cwnd queue */
4396 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4397 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4398 req2->r_cchain.tqe_next = NFSREQNOLIST;
4399 wakeup(req2);
55e303ae 4400 }
2d21ac55 4401 lck_mtx_unlock(&nmp->nm_lock);
1c79356b
A
4402 }
4403
cb323159 4404#if CONFIG_NFS_GSS
6d2010ae 4405 if (nfs_request_using_gss(req)) {
2d21ac55 4406 /*
6d2010ae 4407 * If the request used an RPCSEC_GSS credential
2d21ac55
A
4408 * then reset its sequence number bit in the
4409 * request window.
4410 */
4411 nfs_gss_clnt_rpcdone(req);
4412
4413 /*
4414 * If we need to re-send, go back and re-build the
4415 * request based on a new sequence number.
4416 * Note that we're using the original XID.
4417 */
4418 if (error == EAGAIN) {
4419 req->r_error = 0;
0a7de745 4420 if (mrep) {
2d21ac55 4421 mbuf_freem(mrep);
0a7de745
A
4422 }
4423 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
2d21ac55
A
4424 req->r_nmrep.nmc_mhead = NULL;
4425 req->r_flags |= R_RESTART;
4426 if (error == ENEEDAUTH) {
0a7de745 4427 req->r_xid = 0; // get a new XID
2d21ac55
A
4428 error = 0;
4429 }
4430 goto nfsmout;
4431 }
1c79356b 4432 }
cb323159 4433#endif /* CONFIG_NFS_GSS */
1c79356b
A
4434
4435 /*
2d21ac55
A
4436 * If there was a successful reply, make sure to mark the mount as up.
4437 * If a tprintf message was given (or if this is a timed-out soft mount)
4438 * then post a tprintf message indicating the server is alive again.
1c79356b 4439 */
2d21ac55
A
4440 if (!error) {
4441 if ((req->r_flags & R_TPRINTFMSG) ||
fe8ab488 4442 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
0a7de745 4443 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
2d21ac55 4444 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
0a7de745 4445 } else {
2d21ac55 4446 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
0a7de745 4447 }
1c79356b 4448 }
0a7de745 4449 if (!error && !nmp) {
2d21ac55 4450 error = ENXIO;
0a7de745 4451 }
2d21ac55 4452 nfsmout_if(error);
1c79356b
A
4453
4454 /*
2d21ac55 4455 * break down the RPC header and check if ok
1c79356b 4456 */
2d21ac55
A
4457 nmrep = req->r_nmrep;
4458 nfsm_chain_get_32(error, &nmrep, reply_status);
4459 nfsmout_if(error);
4460 if (reply_status == RPC_MSGDENIED) {
4461 nfsm_chain_get_32(error, &nmrep, rejected_status);
4462 nfsmout_if(error);
4463 if (rejected_status == RPC_MISMATCH) {
4464 error = ENOTSUP;
1c79356b 4465 goto nfsmout;
2d21ac55
A
4466 }
4467 nfsm_chain_get_32(error, &nmrep, auth_status);
4468 nfsmout_if(error);
4469 switch (auth_status) {
cb323159 4470#if CONFIG_NFS_GSS
2d21ac55
A
4471 case RPCSEC_GSS_CREDPROBLEM:
4472 case RPCSEC_GSS_CTXPROBLEM:
4473 /*
4474 * An RPCSEC_GSS cred or context problem.
4475 * We can't use it anymore.
4476 * Restore the args, renew the context
4477 * and set up for a resend.
4478 */
4479 error = nfs_gss_clnt_args_restore(req);
0a7de745 4480 if (error && error != ENEEDAUTH) {
2d21ac55 4481 break;
0a7de745 4482 }
b0d623f7 4483
2d21ac55
A
4484 if (!error) {
4485 error = nfs_gss_clnt_ctx_renew(req);
0a7de745 4486 if (error) {
2d21ac55 4487 break;
0a7de745 4488 }
1c79356b 4489 }
2d21ac55
A
4490 mbuf_freem(mrep);
4491 req->r_nmrep.nmc_mhead = NULL;
0a7de745 4492 req->r_xid = 0; // get a new XID
2d21ac55
A
4493 req->r_flags |= R_RESTART;
4494 goto nfsmout;
cb323159 4495#endif /* CONFIG_NFS_GSS */
2d21ac55
A
4496 default:
4497 error = EACCES;
4498 break;
4499 }
4500 goto nfsmout;
4501 }
4502
4503 /* Now check the verifier */
4504 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4505 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4506 nfsmout_if(error);
4507
6d2010ae
A
4508 switch (req->r_auth) {
4509 case RPCAUTH_NONE:
4510 case RPCAUTH_SYS:
4511 /* Any AUTH_SYS verifier is ignored */
0a7de745 4512 if (verf_len > 0) {
2d21ac55 4513 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
0a7de745 4514 }
2d21ac55
A
4515 nfsm_chain_get_32(error, &nmrep, accepted_status);
4516 break;
cb323159 4517#if CONFIG_NFS_GSS
2d21ac55
A
4518 case RPCAUTH_KRB5:
4519 case RPCAUTH_KRB5I:
4520 case RPCAUTH_KRB5P:
4521 error = nfs_gss_clnt_verf_get(req, &nmrep,
0a7de745 4522 verf_type, verf_len, &accepted_status);
2d21ac55 4523 break;
cb323159 4524#endif /* CONFIG_NFS_GSS */
2d21ac55
A
4525 }
4526 nfsmout_if(error);
4527
4528 switch (accepted_status) {
4529 case RPC_SUCCESS:
4530 if (req->r_procnum == NFSPROC_NULL) {
4531 /*
4532 * The NFS null procedure is unique,
4533 * in not returning an NFS status.
4534 */
4535 *status = NFS_OK;
4536 } else {
4537 nfsm_chain_get_32(error, &nmrep, *status);
4538 nfsmout_if(error);
4539 }
1c79356b 4540
2d21ac55 4541 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
1c79356b 4542 /*
2d21ac55 4543 * It's a JUKEBOX error - delay and try again
1c79356b 4544 */
6d2010ae 4545 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
2d21ac55
A
4546
4547 mbuf_freem(mrep);
4548 req->r_nmrep.nmc_mhead = NULL;
4549 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4550 /* we're not yet completely mounted and */
4551 /* we can't complete an RPC, so we fail */
316670eb 4552 OSAddAtomic64(1, &nfsstats.rpctimeouts);
2d21ac55
A
4553 nfs_softterm(req);
4554 error = req->r_error;
4555 goto nfsmout;
4556 }
4557 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
0a7de745 4558 if (req->r_delay > 30) {
2d21ac55 4559 req->r_delay = 30;
0a7de745 4560 }
b0d623f7
A
4561 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4562 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4563 req->r_flags |= R_JBTPRINTFMSG;
4564 lck_mtx_lock(&nmp->nm_lock);
4565 nmp->nm_jbreqs++;
4566 lck_mtx_unlock(&nmp->nm_lock);
4567 }
2d21ac55 4568 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
0a7de745 4569 "resource temporarily unavailable (jukebox)", 0);
b0d623f7 4570 }
fe8ab488 4571 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
0a7de745 4572 !(req->r_flags & R_NOINTR)) {
b0d623f7 4573 /* for soft mounts, just give up after a short while */
316670eb 4574 OSAddAtomic64(1, &nfsstats.rpctimeouts);
b0d623f7
A
4575 nfs_softterm(req);
4576 error = req->r_error;
4577 goto nfsmout;
2d21ac55
A
4578 }
4579 delay = req->r_delay;
4580 if (req->r_callback.rcb_func) {
4581 struct timeval now;
4582 microuptime(&now);
4583 req->r_resendtime = now.tv_sec + delay;
e5568f75 4584 } else {
2d21ac55 4585 do {
0a7de745 4586 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
b0d623f7 4587 goto nfsmout;
0a7de745
A
4588 }
4589 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
6d2010ae 4590 slpflag = 0;
2d21ac55 4591 } while (--delay > 0);
e5568f75 4592 }
0a7de745 4593 req->r_xid = 0; // get a new XID
2d21ac55
A
4594 req->r_flags |= R_RESTART;
4595 req->r_start = 0;
4596 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
0a7de745 4597 return 0;
1c79356b
A
4598 }
4599
b0d623f7
A
4600 if (req->r_flags & R_JBTPRINTFMSG) {
4601 req->r_flags &= ~R_JBTPRINTFMSG;
4602 lck_mtx_lock(&nmp->nm_lock);
4603 nmp->nm_jbreqs--;
4604 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4605 lck_mtx_unlock(&nmp->nm_lock);
4606 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4607 }
2d21ac55 4608
cb323159 4609#if CONFIG_NFS4
6d2010ae
A
4610 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4611 /*
4612 * Hmmm... we need to try a different security flavor.
4613 * The first time a request hits this, we will allocate an array
4614 * to track flavors to try. We fill the array with the mount's
4615 * preferred flavors or the server's preferred flavors or just the
4616 * flavors we support.
4617 */
4618 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4619 int srvcount, i, j;
4620
4621 /* Call SECINFO to try to get list of flavors from server. */
4622 srvcount = NX_MAX_SEC_FLAVORS;
4623 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4624
4625 if (!req->r_wrongsec) {
4626 /* first time... set up flavor array */
0a7de745 4627 MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK);
6d2010ae
A
4628 if (!req->r_wrongsec) {
4629 error = EACCES;
4630 goto nfsmout;
4631 }
0a7de745 4632 i = 0;
6d2010ae 4633 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
0a7de745 4634 for (; i < nmp->nm_sec.count; i++) {
6d2010ae 4635 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
0a7de745 4636 }
6d2010ae 4637 } else if (srvcount) { /* otherwise use the server's list of flavors */
0a7de745 4638 for (; i < srvcount; i++) {
6d2010ae 4639 req->r_wrongsec[i] = srvflavors[i];
0a7de745 4640 }
6d2010ae
A
4641 } else { /* otherwise, just try the flavors we support. */
4642 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4643 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4644 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4645 req->r_wrongsec[i++] = RPCAUTH_SYS;
4646 req->r_wrongsec[i++] = RPCAUTH_NONE;
4647 }
0a7de745 4648 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
6d2010ae 4649 req->r_wrongsec[i] = RPCAUTH_INVALID;
0a7de745 4650 }
6d2010ae
A
4651 }
4652
4653 /* clear the current flavor from the list */
0a7de745
A
4654 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4655 if (req->r_wrongsec[i] == req->r_auth) {
6d2010ae 4656 req->r_wrongsec[i] = RPCAUTH_INVALID;
0a7de745
A
4657 }
4658 }
6d2010ae
A
4659
4660 /* find the next flavor to try */
0a7de745 4661 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
6d2010ae 4662 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
0a7de745 4663 if (!srvcount) { /* no server list, just try it */
6d2010ae 4664 break;
0a7de745 4665 }
6d2010ae 4666 /* check that it's in the server's list */
0a7de745
A
4667 for (j = 0; j < srvcount; j++) {
4668 if (req->r_wrongsec[i] == srvflavors[j]) {
6d2010ae 4669 break;
0a7de745
A
4670 }
4671 }
4672 if (j < srvcount) { /* found */
6d2010ae 4673 break;
0a7de745 4674 }
6d2010ae
A
4675 /* not found in server list */
4676 req->r_wrongsec[i] = RPCAUTH_INVALID;
4677 }
0a7de745 4678 }
6d2010ae
A
4679 if (i == NX_MAX_SEC_FLAVORS) {
4680 /* nothing left to try! */
4681 error = EACCES;
4682 goto nfsmout;
4683 }
4684
4685 /* retry with the next auth flavor */
4686 req->r_auth = req->r_wrongsec[i];
0a7de745 4687 req->r_xid = 0; // get a new XID
6d2010ae
A
4688 req->r_flags |= R_RESTART;
4689 req->r_start = 0;
4690 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
0a7de745 4691 return 0;
6d2010ae
A
4692 }
4693 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4694 /*
4695 * We renegotiated security for this request; so update the
4696 * default security flavor for the associated node.
4697 */
0a7de745 4698 if (req->r_np) {
6d2010ae 4699 req->r_np->n_auth = req->r_auth;
0a7de745 4700 }
6d2010ae 4701 }
cb323159 4702#endif /* CONFIG_NFS4 */
2d21ac55
A
4703 if (*status == NFS_OK) {
4704 /*
4705 * Successful NFS request
4706 */
4707 *nmrepp = nmrep;
4708 req->r_nmrep.nmc_mhead = NULL;
4709 break;
4710 }
4711 /* Got an NFS error of some kind */
4712
4713 /*
4714 * If the File Handle was stale, invalidate the
4715 * lookup cache, just in case.
4716 */
6d2010ae 4717 if ((*status == ESTALE) && req->r_np) {
2d21ac55 4718 cache_purge(NFSTOV(req->r_np));
6d2010ae 4719 /* if monitored, also send delete event */
0a7de745
A
4720 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4721 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4722 }
6d2010ae 4723 }
0a7de745 4724 if (nmp->nm_vers == NFS_VER2) {
2d21ac55 4725 mbuf_freem(mrep);
0a7de745 4726 } else {
2d21ac55 4727 *nmrepp = nmrep;
0a7de745 4728 }
2d21ac55
A
4729 req->r_nmrep.nmc_mhead = NULL;
4730 error = 0;
4731 break;
4732 case RPC_PROGUNAVAIL:
4733 error = EPROGUNAVAIL;
4734 break;
4735 case RPC_PROGMISMATCH:
4736 error = ERPCMISMATCH;
4737 break;
4738 case RPC_PROCUNAVAIL:
4739 error = EPROCUNAVAIL;
4740 break;
4741 case RPC_GARBAGE:
4742 error = EBADRPC;
4743 break;
4744 case RPC_SYSTEM_ERR:
4745 default:
4746 error = EIO;
4747 break;
1c79356b 4748 }
1c79356b 4749nfsmout:
b0d623f7
A
4750 if (req->r_flags & R_JBTPRINTFMSG) {
4751 req->r_flags &= ~R_JBTPRINTFMSG;
4752 lck_mtx_lock(&nmp->nm_lock);
4753 nmp->nm_jbreqs--;
4754 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4755 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 4756 if (clearjbtimeo) {
b0d623f7 4757 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
0a7de745 4758 }
b0d623f7 4759 }
2d21ac55 4760 FSDBG(273, R_XID32(req->r_xid), nmp, req,
0a7de745
A
4761 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4762 return error;
1c79356b
A
4763}
4764
6d2010ae
A
4765/*
4766 * NFS request using a GSS/Kerberos security flavor?
4767 */
4768int
4769nfs_request_using_gss(struct nfsreq *req)
4770{
0a7de745
A
4771 if (!req->r_gss_ctx) {
4772 return 0;
4773 }
6d2010ae 4774 switch (req->r_auth) {
0a7de745
A
4775 case RPCAUTH_KRB5:
4776 case RPCAUTH_KRB5I:
4777 case RPCAUTH_KRB5P:
4778 return 1;
6d2010ae 4779 }
0a7de745 4780 return 0;
6d2010ae 4781}
2d21ac55 4782
1c79356b 4783/*
2d21ac55 4784 * Perform an NFS request synchronously.
1c79356b 4785 */
2d21ac55 4786
1c79356b 4787int
2d21ac55
A
4788nfs_request(
4789 nfsnode_t np,
0a7de745 4790 mount_t mp, /* used only if !np */
2d21ac55
A
4791 struct nfsm_chain *nmrest,
4792 int procnum,
4793 vfs_context_t ctx,
6d2010ae 4794 struct nfsreq_secinfo_args *si,
2d21ac55
A
4795 struct nfsm_chain *nmrepp,
4796 u_int64_t *xidp,
4797 int *status)
1c79356b 4798{
2d21ac55 4799 return nfs_request2(np, mp, nmrest, procnum,
0a7de745
A
4800 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4801 si, 0, nmrepp, xidp, status);
2d21ac55 4802}
1c79356b 4803
2d21ac55
A
4804int
4805nfs_request2(
4806 nfsnode_t np,
0a7de745 4807 mount_t mp, /* used only if !np */
2d21ac55
A
4808 struct nfsm_chain *nmrest,
4809 int procnum,
4810 thread_t thd,
4811 kauth_cred_t cred,
6d2010ae 4812 struct nfsreq_secinfo_args *si,
2d21ac55
A
4813 int flags,
4814 struct nfsm_chain *nmrepp,
4815 u_int64_t *xidp,
4816 int *status)
4817{
4818 struct nfsreq rq, *req = &rq;
4819 int error;
1c79356b 4820
0a7de745
A
4821 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4822 return error;
4823 }
fe8ab488 4824 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
0a7de745 4825 if (si) {
6d2010ae 4826 req->r_secinfo = *si;
0a7de745 4827 }
1c79356b 4828
2d21ac55
A
4829 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4830 do {
4831 req->r_error = 0;
4832 req->r_flags &= ~R_RESTART;
0a7de745 4833 if ((error = nfs_request_add_header(req))) {
1c79356b 4834 break;
0a7de745
A
4835 }
4836 if (xidp) {
2d21ac55 4837 *xidp = req->r_xid;
0a7de745
A
4838 }
4839 if ((error = nfs_request_send(req, 1))) {
1c79356b 4840 break;
0a7de745 4841 }
2d21ac55 4842 nfs_request_wait(req);
0a7de745 4843 if ((error = nfs_request_finish(req, nmrepp, status))) {
1c79356b 4844 break;
0a7de745 4845 }
2d21ac55 4846 } while (req->r_flags & R_RESTART);
91447636 4847
2d21ac55
A
4848 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4849 nfs_request_rele(req);
0a7de745 4850 return error;
1c79356b
A
4851}
4852
b0d623f7 4853
cb323159 4854#if CONFIG_NFS_GSS
b0d623f7
A
4855/*
4856 * Set up a new null proc request to exchange GSS context tokens with the
4857 * server. Associate the context that we are setting up with the request that we
4858 * are sending.
4859 */
316670eb 4860
b0d623f7
A
4861int
4862nfs_request_gss(
0a7de745
A
4863 mount_t mp,
4864 struct nfsm_chain *nmrest,
4865 thread_t thd,
4866 kauth_cred_t cred,
4867 int flags,
4868 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4869 struct nfsm_chain *nmrepp,
4870 int *status)
b0d623f7
A
4871{
4872 struct nfsreq rq, *req = &rq;
fe8ab488 4873 int error, wait = 1;
b0d623f7 4874
0a7de745
A
4875 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4876 return error;
4877 }
b0d623f7 4878 req->r_flags |= (flags & R_OPTMASK);
316670eb 4879
b0d623f7
A
4880 if (cp == NULL) {
4881 printf("nfs_request_gss request has no context\n");
4882 nfs_request_rele(req);
0a7de745 4883 return NFSERR_EAUTH;
b0d623f7
A
4884 }
4885 nfs_gss_clnt_ctx_ref(req, cp);
4886
fe8ab488
A
4887 /*
4888 * Don't wait for a reply to a context destroy advisory
4889 * to avoid hanging on a dead server.
4890 */
0a7de745 4891 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
fe8ab488 4892 wait = 0;
0a7de745 4893 }
fe8ab488 4894
b0d623f7
A
4895 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4896 do {
4897 req->r_error = 0;
4898 req->r_flags &= ~R_RESTART;
0a7de745 4899 if ((error = nfs_request_add_header(req))) {
b0d623f7 4900 break;
0a7de745 4901 }
b0d623f7 4902
0a7de745 4903 if ((error = nfs_request_send(req, wait))) {
b0d623f7 4904 break;
0a7de745
A
4905 }
4906 if (!wait) {
fe8ab488 4907 break;
0a7de745 4908 }
fe8ab488 4909
b0d623f7 4910 nfs_request_wait(req);
0a7de745 4911 if ((error = nfs_request_finish(req, nmrepp, status))) {
b0d623f7 4912 break;
0a7de745 4913 }
b0d623f7
A
4914 } while (req->r_flags & R_RESTART);
4915
4916 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
fe8ab488
A
4917
4918 nfs_gss_clnt_ctx_unref(req);
b0d623f7 4919 nfs_request_rele(req);
fe8ab488 4920
0a7de745 4921 return error;
b0d623f7 4922}
cb323159 4923#endif /* CONFIG_NFS_GSS */
316670eb 4924
2d21ac55
A
4925/*
4926 * Create and start an asynchronous NFS request.
4927 */
4928int
4929nfs_request_async(
4930 nfsnode_t np,
0a7de745 4931 mount_t mp, /* used only if !np */
2d21ac55
A
4932 struct nfsm_chain *nmrest,
4933 int procnum,
4934 thread_t thd,
4935 kauth_cred_t cred,
6d2010ae
A
4936 struct nfsreq_secinfo_args *si,
4937 int flags,
2d21ac55
A
4938 struct nfsreq_cbinfo *cb,
4939 struct nfsreq **reqp)
4940{
4941 struct nfsreq *req;
6d2010ae 4942 struct nfsmount *nmp;
2d21ac55 4943 int error, sent;
1c79356b 4944
2d21ac55
A
4945 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
4946 req = *reqp;
4947 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
0a7de745
A
4948 if (error) {
4949 return error;
4950 }
6d2010ae 4951 req->r_flags |= (flags & R_OPTMASK);
2d21ac55 4952 req->r_flags |= R_ASYNC;
0a7de745 4953 if (si) {
6d2010ae 4954 req->r_secinfo = *si;
0a7de745
A
4955 }
4956 if (cb) {
2d21ac55 4957 req->r_callback = *cb;
0a7de745 4958 }
2d21ac55
A
4959 error = nfs_request_add_header(req);
4960 if (!error) {
4961 req->r_flags |= R_WAITSENT;
0a7de745 4962 if (req->r_callback.rcb_func) {
2d21ac55 4963 nfs_request_ref(req, 0);
0a7de745 4964 }
2d21ac55
A
4965 error = nfs_request_send(req, 1);
4966 lck_mtx_lock(&req->r_mtx);
4967 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
4968 /* make sure to wait until this async I/O request gets sent */
6d2010ae 4969 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
cb323159 4970 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
36401178 4971 while (!(req->r_flags & R_SENT)) {
fe8ab488
A
4972 nmp = req->r_nmp;
4973 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
6d2010ae
A
4974 lck_mtx_lock(&nmp->nm_lock);
4975 if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
4976 /*
4977 * It's not going to get off the resend queue if we're in recovery.
4978 * So, just take it off ourselves. We could be holding mount state
4979 * busy and thus holding up the start of recovery.
4980 */
4981 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4982 req->r_rchain.tqe_next = NFSREQNOLIST;
0a7de745 4983 if (req->r_flags & R_RESENDQ) {
6d2010ae 4984 req->r_flags &= ~R_RESENDQ;
0a7de745 4985 }
6d2010ae
A
4986 lck_mtx_unlock(&nmp->nm_lock);
4987 req->r_flags |= R_SENDING;
4988 lck_mtx_unlock(&req->r_mtx);
4989 error = nfs_send(req, 1);
3e170ce0
A
4990 /* Remove the R_RESENDQ reference */
4991 nfs_request_rele(req);
6d2010ae 4992 lck_mtx_lock(&req->r_mtx);
0a7de745 4993 if (error) {
6d2010ae 4994 break;
0a7de745 4995 }
6d2010ae
A
4996 continue;
4997 }
4998 lck_mtx_unlock(&nmp->nm_lock);
4999 }
0a7de745 5000 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
2d21ac55 5001 break;
0a7de745 5002 }
36401178
A
5003 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5004 slpflag = 0;
2d21ac55
A
5005 }
5006 }
5007 sent = req->r_flags & R_SENT;
5008 lck_mtx_unlock(&req->r_mtx);
fe8ab488 5009 if (error && req->r_callback.rcb_func && !sent) {
2d21ac55 5010 nfs_request_rele(req);
fe8ab488 5011 }
2d21ac55
A
5012 }
5013 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
0a7de745 5014 if (error || req->r_callback.rcb_func) {
2d21ac55 5015 nfs_request_rele(req);
0a7de745 5016 }
fe8ab488 5017
0a7de745 5018 return error;
2d21ac55 5019}
1c79356b
A
5020
5021/*
2d21ac55 5022 * Wait for and finish an asynchronous NFS request.
1c79356b 5023 */
2d21ac55
A
5024int
5025nfs_request_async_finish(
5026 struct nfsreq *req,
5027 struct nfsm_chain *nmrepp,
5028 u_int64_t *xidp,
5029 int *status)
1c79356b 5030{
cf7d32b8 5031 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
6d2010ae 5032 struct nfsmount *nmp;
2d21ac55
A
5033
5034 lck_mtx_lock(&req->r_mtx);
0a7de745 5035 if (!asyncio) {
2d21ac55 5036 req->r_flags |= R_ASYNCWAIT;
0a7de745 5037 }
cf7d32b8 5038 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
cb323159 5039 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
fe8ab488 5040
6d2010ae
A
5041 if ((nmp = req->r_nmp)) {
5042 lck_mtx_lock(&nmp->nm_lock);
5043 if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5044 /*
5045 * It's not going to get off the resend queue if we're in recovery.
5046 * So, just take it off ourselves. We could be holding mount state
5047 * busy and thus holding up the start of recovery.
5048 */
5049 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5050 req->r_rchain.tqe_next = NFSREQNOLIST;
0a7de745 5051 if (req->r_flags & R_RESENDQ) {
6d2010ae 5052 req->r_flags &= ~R_RESENDQ;
0a7de745 5053 }
3e170ce0
A
5054 /* Remove the R_RESENDQ reference */
5055 assert(req->r_refs > 0);
5056 req->r_refs--;
6d2010ae
A
5057 lck_mtx_unlock(&nmp->nm_lock);
5058 break;
5059 }
5060 lck_mtx_unlock(&nmp->nm_lock);
5061 }
0a7de745 5062 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
cf7d32b8 5063 break;
0a7de745
A
5064 }
5065 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
cf7d32b8 5066 }
2d21ac55
A
5067 lck_mtx_unlock(&req->r_mtx);
5068
cf7d32b8
A
5069 if (!error) {
5070 nfs_request_wait(req);
5071 error = nfs_request_finish(req, nmrepp, status);
5072 }
2d21ac55
A
5073
5074 while (!error && (req->r_flags & R_RESTART)) {
3e170ce0
A
5075 if (asyncio) {
5076 assert(req->r_achain.tqe_next == NFSREQNOLIST);
2d21ac55 5077 lck_mtx_lock(&req->r_mtx);
3e170ce0
A
5078 req->r_flags &= ~R_IOD;
5079 if (req->r_resendtime) { /* send later */
5080 nfs_asyncio_resend(req);
5081 lck_mtx_unlock(&req->r_mtx);
0a7de745 5082 return EINPROGRESS;
3e170ce0 5083 }
2d21ac55 5084 lck_mtx_unlock(&req->r_mtx);
2d21ac55
A
5085 }
5086 req->r_error = 0;
5087 req->r_flags &= ~R_RESTART;
0a7de745 5088 if ((error = nfs_request_add_header(req))) {
2d21ac55 5089 break;
0a7de745
A
5090 }
5091 if ((error = nfs_request_send(req, !asyncio))) {
2d21ac55 5092 break;
0a7de745
A
5093 }
5094 if (asyncio) {
5095 return EINPROGRESS;
5096 }
2d21ac55 5097 nfs_request_wait(req);
0a7de745 5098 if ((error = nfs_request_finish(req, nmrepp, status))) {
2d21ac55 5099 break;
0a7de745 5100 }
1c79356b 5101 }
0a7de745 5102 if (xidp) {
2d21ac55 5103 *xidp = req->r_xid;
0a7de745 5104 }
2d21ac55
A
5105
5106 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5107 nfs_request_rele(req);
0a7de745 5108 return error;
1c79356b
A
5109}
5110
2d21ac55
A
5111/*
5112 * Cancel a pending asynchronous NFS request.
5113 */
1c79356b 5114void
2d21ac55 5115nfs_request_async_cancel(struct nfsreq *req)
1c79356b 5116{
2d21ac55
A
5117 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5118 nfs_request_rele(req);
1c79356b
A
5119}
5120
55e303ae 5121/*
2d21ac55 5122 * Flag a request as being terminated.
55e303ae 5123 */
b0d623f7 5124void
2d21ac55 5125nfs_softterm(struct nfsreq *req)
55e303ae 5126{
2d21ac55
A
5127 struct nfsmount *nmp = req->r_nmp;
5128 req->r_flags |= R_SOFTTERM;
5129 req->r_error = ETIMEDOUT;
0a7de745 5130 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
2d21ac55 5131 return;
0a7de745 5132 }
2d21ac55
A
5133 /* update congestion window */
5134 req->r_flags &= ~R_CWND;
5135 lck_mtx_lock(&nmp->nm_lock);
5136 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5137 nmp->nm_sent -= NFS_CWNDSCALE;
5138 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5139 /* congestion window is open, poke the cwnd queue */
5140 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5141 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5142 req2->r_cchain.tqe_next = NFSREQNOLIST;
5143 wakeup(req2);
5144 }
5145 lck_mtx_unlock(&nmp->nm_lock);
5146}
55e303ae 5147
2d21ac55
A
5148/*
5149 * Ensure req isn't in use by the timer, then dequeue it.
5150 */
b0d623f7 5151void
2d21ac55
A
5152nfs_reqdequeue(struct nfsreq *req)
5153{
5154 lck_mtx_lock(nfs_request_mutex);
5155 while (req->r_lflags & RL_BUSY) {
5156 req->r_lflags |= RL_WAITING;
5157 msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
5158 }
5159 if (req->r_lflags & RL_QUEUED) {
5160 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5161 req->r_lflags &= ~RL_QUEUED;
55e303ae 5162 }
2d21ac55 5163 lck_mtx_unlock(nfs_request_mutex);
55e303ae
A
5164}
5165
5166/*
5167 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5168 * free()'d out from under it.
5169 */
b0d623f7 5170void
2d21ac55 5171nfs_reqbusy(struct nfsreq *req)
55e303ae 5172{
0a7de745 5173 if (req->r_lflags & RL_BUSY) {
2d21ac55 5174 panic("req locked");
0a7de745 5175 }
2d21ac55 5176 req->r_lflags |= RL_BUSY;
55e303ae
A
5177}
5178
5179/*
5180 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5181 */
b0d623f7 5182struct nfsreq *
2d21ac55 5183nfs_reqnext(struct nfsreq *req)
55e303ae 5184{
2d21ac55 5185 struct nfsreq * nextreq;
55e303ae 5186
0a7de745
A
5187 if (req == NULL) {
5188 return NULL;
5189 }
55e303ae
A
5190 /*
5191 * We need to get and busy the next req before signalling the
5192 * current one, otherwise wakeup() may block us and we'll race to
5193 * grab the next req.
5194 */
2d21ac55 5195 nextreq = TAILQ_NEXT(req, r_chain);
0a7de745 5196 if (nextreq != NULL) {
2d21ac55 5197 nfs_reqbusy(nextreq);
0a7de745 5198 }
55e303ae 5199 /* unbusy and signal. */
2d21ac55
A
5200 req->r_lflags &= ~RL_BUSY;
5201 if (req->r_lflags & RL_WAITING) {
5202 req->r_lflags &= ~RL_WAITING;
5203 wakeup(&req->r_lflags);
55e303ae 5204 }
0a7de745 5205 return nextreq;
55e303ae
A
5206}
5207
1c79356b 5208/*
2d21ac55
A
5209 * NFS request queue timer routine
5210 *
5211 * Scan the NFS request queue for any requests that have timed out.
5212 *
5213 * Alert the system of unresponsive servers.
5214 * Mark expired requests on soft mounts as terminated.
5215 * For UDP, mark/signal requests for retransmission.
1c79356b
A
5216 */
5217void
2d21ac55 5218nfs_request_timer(__unused void *param0, __unused void *param1)
1c79356b 5219{
2d21ac55 5220 struct nfsreq *req;
91447636 5221 struct nfsmount *nmp;
2d21ac55 5222 int timeo, maxtime, finish_asyncio, error;
55e303ae 5223 struct timeval now;
2d21ac55 5224 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
d9a64523 5225 TAILQ_INIT(&nfs_mount_poke_queue);
2d21ac55 5226
fe8ab488 5227restart:
2d21ac55
A
5228 lck_mtx_lock(nfs_request_mutex);
5229 req = TAILQ_FIRST(&nfs_reqq);
0a7de745 5230 if (req == NULL) { /* no requests - turn timer off */
2d21ac55
A
5231 nfs_request_timer_on = 0;
5232 lck_mtx_unlock(nfs_request_mutex);
5233 return;
5234 }
5235
5236 nfs_reqbusy(req);
1c79356b 5237
55e303ae 5238 microuptime(&now);
0a7de745 5239 for (; req != NULL; req = nfs_reqnext(req)) {
2d21ac55 5240 nmp = req->r_nmp;
fe8ab488
A
5241 if (nmp == NULL) {
5242 NFS_SOCK_DBG("Found a request with out a mount!\n");
1c79356b 5243 continue;
fe8ab488 5244 }
0a7de745 5245 if (req->r_error || req->r_nmrep.nmc_mhead) {
1c79356b 5246 continue;
0a7de745 5247 }
2d21ac55
A
5248 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5249 if (req->r_callback.rcb_func != NULL) {
5250 /* async I/O RPC needs to be finished */
5251 lck_mtx_lock(&req->r_mtx);
5252 req->r_error = error;
5253 finish_asyncio = !(req->r_flags & R_WAITSENT);
5254 wakeup(req);
5255 lck_mtx_unlock(&req->r_mtx);
0a7de745 5256 if (finish_asyncio) {
2d21ac55 5257 nfs_asyncio_finish(req);
0a7de745 5258 }
2d21ac55
A
5259 }
5260 continue;
5261 }
5262
5263 lck_mtx_lock(&req->r_mtx);
5264
5265 if (nmp->nm_tprintf_initial_delay &&
5266 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5267 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5268 req->r_lastmsg = now.tv_sec;
5269 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
0a7de745 5270 "not responding", 1);
2d21ac55
A
5271 req->r_flags |= R_TPRINTFMSG;
5272 lck_mtx_lock(&nmp->nm_lock);
4a249263 5273 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
2d21ac55 5274 lck_mtx_unlock(&nmp->nm_lock);
4a249263
A
5275 /* we're not yet completely mounted and */
5276 /* we can't complete an RPC, so we fail */
316670eb 5277 OSAddAtomic64(1, &nfsstats.rpctimeouts);
2d21ac55
A
5278 nfs_softterm(req);
5279 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5280 wakeup(req);
5281 lck_mtx_unlock(&req->r_mtx);
0a7de745 5282 if (finish_asyncio) {
2d21ac55 5283 nfs_asyncio_finish(req);
0a7de745 5284 }
4a249263
A
5285 continue;
5286 }
2d21ac55 5287 lck_mtx_unlock(&nmp->nm_lock);
1c79356b 5288 }
2d21ac55 5289
1c79356b 5290 /*
2d21ac55
A
5291 * Put a reasonable limit on the maximum timeout,
5292 * and reduce that limit when soft mounts get timeouts or are in reconnect.
1c79356b 5293 */
0a7de745 5294 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
2d21ac55 5295 maxtime = NFS_MAXTIMEO;
0a7de745
A
5296 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5297 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5298 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5299 } else {
5300 maxtime = NFS_MINTIMEO / 4;
5301 }
1c79356b
A
5302
5303 /*
2d21ac55 5304 * Check for request timeout.
1c79356b 5305 */
2d21ac55
A
5306 if (req->r_rtt >= 0) {
5307 req->r_rtt++;
5308 lck_mtx_lock(&nmp->nm_lock);
5309 if (req->r_flags & R_RESENDERR) {
5310 /* with resend errors, retry every few seconds */
0a7de745 5311 timeo = 4 * hz;
1c79356b 5312 } else {
0a7de745 5313 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
2d21ac55 5314 timeo = NFS_MINIDEMTIMEO; // gss context setup
0a7de745 5315 } else if (NMFLAG(nmp, DUMBTIMER)) {
2d21ac55 5316 timeo = nmp->nm_timeo;
0a7de745 5317 } else {
2d21ac55 5318 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
0a7de745 5319 }
1c79356b 5320
2d21ac55 5321 /* ensure 62.5 ms floor */
0a7de745 5322 while (16 * timeo < hz) {
2d21ac55 5323 timeo *= 2;
0a7de745
A
5324 }
5325 if (nmp->nm_timeouts > 0) {
2d21ac55 5326 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
0a7de745 5327 }
2d21ac55
A
5328 }
5329 /* limit timeout to max */
0a7de745 5330 if (timeo > maxtime) {
2d21ac55 5331 timeo = maxtime;
0a7de745 5332 }
2d21ac55 5333 if (req->r_rtt <= timeo) {
fe8ab488 5334 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
2d21ac55
A
5335 lck_mtx_unlock(&nmp->nm_lock);
5336 lck_mtx_unlock(&req->r_mtx);
5337 continue;
91447636 5338 }
2d21ac55 5339 /* The request has timed out */
39236c6e 5340 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
0a7de745
A
5341 req->r_procnum, proct[req->r_procnum],
5342 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5343 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5344 if (nmp->nm_timeouts < 8) {
2d21ac55 5345 nmp->nm_timeouts++;
0a7de745 5346 }
fe8ab488
A
5347 if (nfs_mount_check_dead_timeout(nmp)) {
5348 /* Unbusy this request */
5349 req->r_lflags &= ~RL_BUSY;
5350 if (req->r_lflags & RL_WAITING) {
5351 req->r_lflags &= ~RL_WAITING;
5352 wakeup(&req->r_lflags);
5353 }
5354 lck_mtx_unlock(&req->r_mtx);
5355
0a7de745 5356 /* No need to poke this mount */
fe8ab488
A
5357 if (nmp->nm_sockflags & NMSOCK_POKE) {
5358 nmp->nm_sockflags &= ~NMSOCK_POKE;
5359 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5360 }
5361 /* Release our lock state, so we can become a zombie */
5362 lck_mtx_unlock(nfs_request_mutex);
5363
5364 /*
5365 * Note nfs_mount_make zombie(nmp) must be
5366 * called with nm_lock held. After doing some
5367 * work we release nm_lock in
5368 * nfs_make_mount_zombie with out acquiring any
5369 * other locks. (Later, in nfs_mount_zombie we
5370 * will acquire nfs_request_mutex, r_mtx,
5371 * nm_lock in that order). So we should not be
5372 * introducing deadlock here. We take a reference
5373 * on the mount so that its still there when we
5374 * release the lock.
5375 */
5376 nmp->nm_ref++;
5377 nfs_mount_make_zombie(nmp);
5378 lck_mtx_unlock(&nmp->nm_lock);
5379 nfs_mount_rele(nmp);
5380
5381 /*
5382 * All the request for this mount have now been
5383 * removed from the request queue. Restart to
5384 * process the remaining mounts
5385 */
5386 goto restart;
5387 }
0a7de745 5388
2d21ac55
A
5389 /* if it's been a few seconds, try poking the socket */
5390 if ((nmp->nm_sotype == SOCK_STREAM) &&
5391 ((now.tv_sec - req->r_start) >= 3) &&
0a7de745 5392 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
6d2010ae 5393 (nmp->nm_sockflags & NMSOCK_READY)) {
2d21ac55 5394 nmp->nm_sockflags |= NMSOCK_POKE;
39037602
A
5395 /*
5396 * We take a ref on the mount so that we know the mount will still be there
5397 * when we process the nfs_mount_poke_queue. An unmount request will block
5398 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5399 * the reference after calling nfs_sock_poke below;
5400 */
5401 nmp->nm_ref++;
2d21ac55
A
5402 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5403 }
5404 lck_mtx_unlock(&nmp->nm_lock);
5405 }
1c79356b 5406
b0d623f7 5407 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
0a7de745 5408 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
2d21ac55 5409 ((req->r_rexmit >= req->r_retry) || /* too many */
0a7de745 5410 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
316670eb 5411 OSAddAtomic64(1, &nfsstats.rpctimeouts);
2d21ac55
A
5412 lck_mtx_lock(&nmp->nm_lock);
5413 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5414 lck_mtx_unlock(&nmp->nm_lock);
5415 /* make sure we note the unresponsive server */
5416 /* (maxtime may be less than tprintf delay) */
5417 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
0a7de745 5418 "not responding", 1);
2d21ac55
A
5419 req->r_lastmsg = now.tv_sec;
5420 req->r_flags |= R_TPRINTFMSG;
5421 } else {
5422 lck_mtx_unlock(&nmp->nm_lock);
5423 }
6d2010ae
A
5424 if (req->r_flags & R_NOINTR) {
5425 /* don't terminate nointr requests on timeout */
5426 lck_mtx_unlock(&req->r_mtx);
5427 continue;
5428 }
39236c6e 5429 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
0a7de745
A
5430 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5431 now.tv_sec - req->r_start);
2d21ac55
A
5432 nfs_softterm(req);
5433 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5434 wakeup(req);
5435 lck_mtx_unlock(&req->r_mtx);
0a7de745 5436 if (finish_asyncio) {
2d21ac55 5437 nfs_asyncio_finish(req);
0a7de745 5438 }
2d21ac55
A
5439 continue;
5440 }
1c79356b 5441
2d21ac55
A
5442 /* for TCP, only resend if explicitly requested */
5443 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
0a7de745 5444 if (++req->r_rexmit > NFS_MAXREXMIT) {
2d21ac55 5445 req->r_rexmit = NFS_MAXREXMIT;
0a7de745 5446 }
2d21ac55
A
5447 req->r_rtt = 0;
5448 lck_mtx_unlock(&req->r_mtx);
5449 continue;
1c79356b 5450 }
483a1d10 5451
483a1d10 5452 /*
2d21ac55
A
5453 * The request needs to be (re)sent. Kick the requester to resend it.
5454 * (unless it's already marked as needing a resend)
483a1d10 5455 */
2d21ac55
A
5456 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5457 lck_mtx_unlock(&req->r_mtx);
5458 continue;
5459 }
39236c6e 5460 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
0a7de745 5461 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2d21ac55
A
5462 req->r_flags |= R_MUSTRESEND;
5463 req->r_rtt = -1;
5464 wakeup(req);
0a7de745 5465 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2d21ac55 5466 nfs_asyncio_resend(req);
0a7de745 5467 }
2d21ac55
A
5468 lck_mtx_unlock(&req->r_mtx);
5469 }
5470
5471 lck_mtx_unlock(nfs_request_mutex);
5472
5473 /* poke any sockets */
5474 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5475 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5476 nfs_sock_poke(nmp);
39037602 5477 nfs_mount_rele(nmp);
2d21ac55
A
5478 }
5479
5480 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
1c79356b
A
5481}
5482
2d21ac55
A
5483/*
5484 * check a thread's proc for the "noremotehang" flag.
5485 */
5486int
5487nfs_noremotehang(thread_t thd)
5488{
5489 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
0a7de745 5490 return p && proc_noremotehang(p);
2d21ac55 5491}
1c79356b
A
5492
5493/*
5494 * Test for a termination condition pending on the process.
55e303ae 5495 * This is used to determine if we need to bail on a mount.
2d21ac55 5496 * ETIMEDOUT is returned if there has been a soft timeout.
55e303ae
A
5497 * EINTR is returned if there is a signal pending that is not being ignored
5498 * and the mount is interruptable, or if we are a thread that is in the process
5499 * of cancellation (also SIGKILL posted).
1c79356b 5500 */
0a7de745 5501extern int sigprop[NSIG + 1];
1c79356b 5502int
2d21ac55 5503nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
1c79356b 5504{
0b4c1975 5505 proc_t p;
2d21ac55 5506 int error = 0;
55e303ae 5507
0a7de745
A
5508 if (!nmp) {
5509 return ENXIO;
5510 }
2d21ac55 5511
0a7de745
A
5512 if (req && (req->r_flags & R_SOFTTERM)) {
5513 return ETIMEDOUT; /* request has been terminated. */
5514 }
5515 if (req && (req->r_flags & R_NOINTR)) {
6d2010ae 5516 thd = NULL; /* don't check for signal on R_NOINTR */
0a7de745
A
5517 }
5518 if (!nmplocked) {
2d21ac55 5519 lck_mtx_lock(&nmp->nm_lock);
0a7de745 5520 }
6d2010ae
A
5521 if (nmp->nm_state & NFSSTA_FORCE) {
5522 /* If a force unmount is in progress then fail. */
2d21ac55 5523 error = EIO;
fe8ab488 5524 } else if (vfs_isforce(nmp->nm_mountp)) {
55e303ae 5525 /* Someone is unmounting us, go soft and mark it. */
6d2010ae 5526 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
2d21ac55 5527 nmp->nm_state |= NFSSTA_FORCE;
55e303ae 5528 }
2d21ac55 5529
b0d623f7 5530 /* Check if the mount is marked dead. */
0a7de745 5531 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
b0d623f7 5532 error = ENXIO;
0a7de745 5533 }
b0d623f7 5534
2d21ac55
A
5535 /*
5536 * If the mount is hung and we've requested not to hang
5537 * on remote filesystems, then bail now.
5538 */
39236c6e 5539 if (current_proc() != kernproc &&
0a7de745 5540 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
2d21ac55 5541 error = EIO;
0a7de745 5542 }
2d21ac55 5543
0a7de745 5544 if (!nmplocked) {
2d21ac55 5545 lck_mtx_unlock(&nmp->nm_lock);
0a7de745
A
5546 }
5547 if (error) {
5548 return error;
5549 }
2d21ac55
A
5550
5551 /* may not have a thread for async I/O */
0a7de745
A
5552 if (thd == NULL || current_proc() == kernproc) {
5553 return 0;
5554 }
1c79356b 5555
6d2010ae
A
5556 /*
5557 * Check if the process is aborted, but don't interrupt if we
5558 * were killed by a signal and this is the exiting thread which
5559 * is attempting to dump core.
5560 */
5561 if (((p = current_proc()) != kernproc) && current_thread_aborted() &&
5562 (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) ||
0a7de745
A
5563 (p->p_sigacts == NULL) ||
5564 (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) ||
5565 !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) {
5566 return EINTR;
5567 }
91447636 5568
2d21ac55 5569 /* mask off thread and process blocked signals. */
6d2010ae 5570 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
0a7de745
A
5571 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5572 return EINTR;
5573 }
5574 return 0;
1c79356b
A
5575}
5576
5577/*
5578 * Lock a socket against others.
5579 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5580 * and also to avoid race conditions between the processes with nfs requests
5581 * in progress when a reconnect is necessary.
5582 */
5583int
2d21ac55 5584nfs_sndlock(struct nfsreq *req)
1c79356b 5585{
2d21ac55 5586 struct nfsmount *nmp = req->r_nmp;
91447636 5587 int *statep;
2d21ac55 5588 int error = 0, slpflag = 0;
cb323159 5589 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
1c79356b 5590
0a7de745
A
5591 if (nfs_mount_gone(nmp)) {
5592 return ENXIO;
5593 }
55e303ae 5594
2d21ac55
A
5595 lck_mtx_lock(&nmp->nm_lock);
5596 statep = &nmp->nm_state;
5597
0a7de745 5598 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
55e303ae 5599 slpflag = PCATCH;
0a7de745 5600 }
36401178 5601 while (*statep & NFSSTA_SNDLOCK) {
0a7de745 5602 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
2d21ac55 5603 break;
0a7de745 5604 }
55e303ae 5605 *statep |= NFSSTA_WANTSND;
0a7de745 5606 if (nfs_noremotehang(req->r_thread)) {
2d21ac55 5607 ts.tv_sec = 1;
0a7de745 5608 }
36401178 5609 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
1c79356b
A
5610 if (slpflag == PCATCH) {
5611 slpflag = 0;
2d21ac55 5612 ts.tv_sec = 2;
1c79356b
A
5613 }
5614 }
0a7de745 5615 if (!error) {
2d21ac55 5616 *statep |= NFSSTA_SNDLOCK;
0a7de745 5617 }
2d21ac55 5618 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 5619 return error;
1c79356b
A
5620}
5621
5622/*
5623 * Unlock the stream socket for others.
5624 */
5625void
2d21ac55 5626nfs_sndunlock(struct nfsreq *req)
1c79356b 5627{
2d21ac55
A
5628 struct nfsmount *nmp = req->r_nmp;
5629 int *statep, wake = 0;
1c79356b 5630
0a7de745 5631 if (!nmp) {
55e303ae 5632 return;
0a7de745 5633 }
2d21ac55
A
5634 lck_mtx_lock(&nmp->nm_lock);
5635 statep = &nmp->nm_state;
0a7de745 5636 if ((*statep & NFSSTA_SNDLOCK) == 0) {
1c79356b 5637 panic("nfs sndunlock");
0a7de745
A
5638 }
5639 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
55e303ae
A
5640 if (*statep & NFSSTA_WANTSND) {
5641 *statep &= ~NFSSTA_WANTSND;
2d21ac55 5642 wake = 1;
1c79356b 5643 }
2d21ac55 5644 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 5645 if (wake) {
2d21ac55 5646 wakeup(statep);
0a7de745 5647 }
1c79356b
A
5648}
5649
b0d623f7
A
5650int
5651nfs_aux_request(
5652 struct nfsmount *nmp,
5653 thread_t thd,
6d2010ae
A
5654 struct sockaddr *saddr,
5655 socket_t so,
5656 int sotype,
b0d623f7
A
5657 mbuf_t mreq,
5658 uint32_t xid,
5659 int bindresv,
5660 int timeo,
5661 struct nfsm_chain *nmrep)
5662{
6d2010ae
A
5663 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5664 socket_t newso = NULL;
5665 struct sockaddr_storage ss;
cb323159 5666 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
b0d623f7
A
5667 mbuf_t m, mrep = NULL;
5668 struct msghdr msg;
5669 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5670 uint32_t verf_type, verf_len, accepted_status;
6d2010ae
A
5671 size_t readlen, sentlen;
5672 struct nfs_rpc_record_state nrrs;
b0d623f7 5673
6d2010ae
A
5674 if (!so) {
5675 /* create socket and set options */
cb323159
A
5676 if (saddr->sa_family == AF_LOCAL) {
5677 soproto = 0;
5678 } else {
5679 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5680 }
0a7de745 5681 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
6d2010ae 5682 goto nfsmout;
0a7de745 5683 }
6d2010ae 5684
cb323159 5685 if (bindresv && saddr->sa_family != AF_LOCAL) {
6d2010ae
A
5686 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5687 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5688 int portrange = IP_PORTRANGE_LOW;
5689 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5690 nfsmout_if(error);
5691 ss.ss_len = saddr->sa_len;
5692 ss.ss_family = saddr->sa_family;
5693 if (ss.ss_family == AF_INET) {
5694 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5695 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5696 } else if (ss.ss_family == AF_INET6) {
5697 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5698 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5699 } else {
5700 error = EINVAL;
5701 }
0a7de745 5702 if (!error) {
6d2010ae 5703 error = sock_bind(newso, (struct sockaddr *)&ss);
0a7de745 5704 }
6d2010ae
A
5705 nfsmout_if(error);
5706 }
5707
5708 if (sotype == SOCK_STREAM) {
0a7de745 5709# define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
fe8ab488 5710 int count = 0;
0a7de745 5711
fe8ab488 5712 error = sock_connect(newso, saddr, MSG_DONTWAIT);
0a7de745 5713 if (error == EINPROGRESS) {
fe8ab488 5714 error = 0;
0a7de745 5715 }
fe8ab488
A
5716 nfsmout_if(error);
5717
5718 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5719 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5720 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5721 error = ETIMEDOUT;
5722 break;
5723 }
5724 }
6d2010ae
A
5725 nfsmout_if(error);
5726 }
5727 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5728 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
0a7de745 5729 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
6d2010ae 5730 goto nfsmout;
0a7de745 5731 }
6d2010ae
A
5732 so = newso;
5733 } else {
5734 /* make sure socket is using a one second timeout in this function */
5735 optlen = sizeof(orig_rcvto);
5736 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5737 if (!error) {
5738 optlen = sizeof(orig_sndto);
5739 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5740 }
5741 if (!error) {
5742 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5743 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5744 restoreto = 1;
5745 }
5746 }
5747
5748 if (sotype == SOCK_STREAM) {
5749 sendat = 0; /* we only resend the request for UDP */
5750 nfs_rpc_record_state_init(&nrrs);
b0d623f7
A
5751 }
5752
0a7de745
A
5753 for (try = 0; try < timeo; try++) {
5754 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
b0d623f7 5755 break;
0a7de745 5756 }
b0d623f7 5757 if (!try || (try == sendat)) {
6d2010ae 5758 /* send the request (resending periodically for UDP) */
0a7de745 5759 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
b0d623f7 5760 goto nfsmout;
0a7de745 5761 }
b0d623f7 5762 bzero(&msg, sizeof(msg));
6d2010ae
A
5763 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5764 msg.msg_name = saddr;
5765 msg.msg_namelen = saddr->sa_len;
5766 }
0a7de745 5767 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
b0d623f7 5768 goto nfsmout;
0a7de745 5769 }
b0d623f7 5770 sendat *= 2;
0a7de745 5771 if (sendat > 30) {
b0d623f7 5772 sendat = 30;
0a7de745 5773 }
b0d623f7
A
5774 }
5775 /* wait for the response */
6d2010ae
A
5776 if (sotype == SOCK_STREAM) {
5777 /* try to read (more of) record */
5778 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5779 /* if we don't have the whole record yet, we'll keep trying */
5780 } else {
0a7de745 5781 readlen = 1 << 18;
6d2010ae
A
5782 bzero(&msg, sizeof(msg));
5783 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5784 }
0a7de745 5785 if (error == EWOULDBLOCK) {
b0d623f7 5786 continue;
0a7de745 5787 }
b0d623f7
A
5788 nfsmout_if(error);
5789 /* parse the response */
5790 nfsm_chain_dissect_init(error, nmrep, mrep);
5791 nfsm_chain_get_32(error, nmrep, rxid);
5792 nfsm_chain_get_32(error, nmrep, reply);
5793 nfsmout_if(error);
0a7de745 5794 if ((rxid != xid) || (reply != RPC_REPLY)) {
b0d623f7 5795 error = EBADRPC;
0a7de745 5796 }
b0d623f7
A
5797 nfsm_chain_get_32(error, nmrep, reply_status);
5798 nfsmout_if(error);
5799 if (reply_status == RPC_MSGDENIED) {
5800 nfsm_chain_get_32(error, nmrep, rejected_status);
5801 nfsmout_if(error);
6d2010ae 5802 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
b0d623f7
A
5803 goto nfsmout;
5804 }
5805 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5806 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5807 nfsmout_if(error);
0a7de745 5808 if (verf_len) {
b0d623f7 5809 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
0a7de745 5810 }
b0d623f7 5811 nfsm_chain_get_32(error, nmrep, accepted_status);
6d2010ae
A
5812 nfsmout_if(error);
5813 switch (accepted_status) {
5814 case RPC_SUCCESS:
5815 error = 0;
5816 break;
5817 case RPC_PROGUNAVAIL:
5818 error = EPROGUNAVAIL;
5819 break;
5820 case RPC_PROGMISMATCH:
5821 error = EPROGMISMATCH;
5822 break;
5823 case RPC_PROCUNAVAIL:
5824 error = EPROCUNAVAIL;
5825 break;
5826 case RPC_GARBAGE:
5827 error = EBADRPC;
5828 break;
5829 case RPC_SYSTEM_ERR:
5830 default:
5831 error = EIO;
5832 break;
5833 }
b0d623f7
A
5834 break;
5835 }
5836nfsmout:
6d2010ae
A
5837 if (restoreto) {
5838 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5839 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5840 }
5841 if (newso) {
5842 sock_shutdown(newso, SHUT_RDWR);
5843 sock_close(newso);
b0d623f7
A
5844 }
5845 mbuf_freem(mreq);
0a7de745 5846 return error;
b0d623f7
A
5847}
5848
6d2010ae
A
5849int
5850nfs_portmap_lookup(
5851 struct nfsmount *nmp,
5852 vfs_context_t ctx,
5853 struct sockaddr *sa,
5854 socket_t so,
5855 uint32_t protocol,
5856 uint32_t vers,
cb323159 5857 uint32_t stype,
6d2010ae
A
5858 int timeo)
5859{
5860 thread_t thd = vfs_context_thread(ctx);
5861 kauth_cred_t cred = vfs_context_ucred(ctx);
5862 struct sockaddr_storage ss;
5863 struct sockaddr *saddr = (struct sockaddr*)&ss;
cb323159
A
5864 static struct sockaddr_un rpcbind_cots = {
5865 sizeof(struct sockaddr_un),
5866 AF_LOCAL,
5867 RPCB_TICOTSORD_PATH
5868 };
5869 static struct sockaddr_un rpcbind_clts = {
5870 sizeof(struct sockaddr_un),
5871 AF_LOCAL,
5872 RPCB_TICLTS_PATH
5873 };
6d2010ae
A
5874 struct nfsm_chain nmreq, nmrep;
5875 mbuf_t mreq;
d26ffc64
A
5876 int error = 0, ip, pmprog, pmvers, pmproc;
5877 uint32_t ualen = 0;
6d2010ae
A
5878 uint32_t port;
5879 uint64_t xid = 0;
0a7de745 5880 char uaddr[MAX_IPv6_STR_LEN + 16];
6d2010ae
A
5881
5882 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5883 if (saddr->sa_family == AF_INET) {
5884 ip = 4;
5885 pmprog = PMAPPROG;
5886 pmvers = PMAPVERS;
5887 pmproc = PMAPPROC_GETPORT;
5888 } else if (saddr->sa_family == AF_INET6) {
5889 ip = 6;
5890 pmprog = RPCBPROG;
5891 pmvers = RPCBVERS4;
5892 pmproc = RPCBPROC_GETVERSADDR;
cb323159
A
5893 } else if (saddr->sa_family == AF_LOCAL) {
5894 ip = 0;
5895 pmprog = RPCBPROG;
5896 pmvers = RPCBVERS4;
5897 pmproc = RPCBPROC_GETVERSADDR;
5898 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5899 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
6d2010ae 5900 } else {
0a7de745 5901 return EINVAL;
6d2010ae
A
5902 }
5903 nfsm_chain_null(&nmreq);
5904 nfsm_chain_null(&nmrep);
5905
5906tryagain:
5907 /* send portmapper request to get port/uaddr */
0a7de745 5908 if (ip == 4) {
6d2010ae 5909 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
cb323159 5910 } else if (ip == 6) {
6d2010ae 5911 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
0a7de745
A
5912 }
5913 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
6d2010ae
A
5914 nfsm_chain_add_32(error, &nmreq, protocol);
5915 nfsm_chain_add_32(error, &nmreq, vers);
5916 if (ip == 4) {
cb323159 5917 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
6d2010ae
A
5918 nfsm_chain_add_32(error, &nmreq, 0);
5919 } else {
cb323159
A
5920 if (stype == SOCK_STREAM) {
5921 if (ip == 6) {
5922 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5923 } else {
5924 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5925 }
0a7de745 5926 } else {
cb323159
A
5927 if (ip == 6) {
5928 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
5929 } else {
5930 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
5931 }
0a7de745 5932 }
6d2010ae
A
5933 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
5934 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
5935 }
5936 nfsm_chain_build_done(error, &nmreq);
5937 nfsmout_if(error);
cb323159
A
5938 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
5939 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
6d2010ae
A
5940 nfsmout_if(error);
5941 nmreq.nmc_mhead = NULL;
cb323159
A
5942
5943 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
5944 error = nfs_aux_request(nmp, thd, saddr, so,
5945 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
5946 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
5947 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers,
5948 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
5949 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket");
6d2010ae
A
5950
5951 /* grab port from portmap response */
5952 if (ip == 4) {
5953 nfsm_chain_get_32(error, &nmrep, port);
0a7de745 5954 if (!error) {
6d2010ae 5955 ((struct sockaddr_in*)sa)->sin_port = htons(port);
0a7de745 5956 }
6d2010ae
A
5957 } else {
5958 /* get uaddr string and convert to sockaddr */
5959 nfsm_chain_get_32(error, &nmrep, ualen);
5960 if (!error) {
0a7de745 5961 if (ualen > (sizeof(uaddr) - 1)) {
6d2010ae 5962 error = EIO;
0a7de745 5963 }
6d2010ae
A
5964 if (ualen < 1) {
5965 /* program is not available, just return a zero port */
5966 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
cb323159
A
5967 if (ip == 6) {
5968 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
5969 } else {
5970 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
5971 }
5972 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
6d2010ae
A
5973 } else {
5974 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
cb323159 5975 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
6d2010ae
A
5976 if (!error) {
5977 uaddr[ualen] = '\0';
0a7de745 5978 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
6d2010ae 5979 error = EIO;
0a7de745 5980 }
6d2010ae
A
5981 }
5982 }
5983 }
5984 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
5985 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
5986 if (pmvers == RPCBVERS4) {
5987 /* fall back to v3 and GETADDR */
5988 pmvers = RPCBVERS3;
5989 pmproc = RPCBPROC_GETADDR;
5990 nfsm_chain_cleanup(&nmreq);
5991 nfsm_chain_cleanup(&nmrep);
5992 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5993 xid = 0;
5994 error = 0;
5995 goto tryagain;
5996 }
5997 }
0a7de745 5998 if (!error) {
6d2010ae 5999 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
0a7de745 6000 }
6d2010ae
A
6001 }
6002nfsmout:
6003 nfsm_chain_cleanup(&nmreq);
6004 nfsm_chain_cleanup(&nmrep);
cb323159
A
6005 NFS_SOCK_DBG("Returned %d\n", error);
6006
0a7de745 6007 return error;
6d2010ae
A
6008}
6009
b0d623f7
A
6010int
6011nfs_msg(thread_t thd,
0a7de745
A
6012 const char *server,
6013 const char *msg,
6014 int error)
b0d623f7
A
6015{
6016 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6017 tpr_t tpr;
6018
0a7de745 6019 if (p) {
b0d623f7 6020 tpr = tprintf_open(p);
0a7de745 6021 } else {
b0d623f7 6022 tpr = NULL;
0a7de745
A
6023 }
6024 if (error) {
b0d623f7 6025 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
0a7de745 6026 } else {
b0d623f7 6027 tprintf(tpr, "nfs server %s: %s\n", server, msg);
0a7de745 6028 }
b0d623f7 6029 tprintf_close(tpr);
0a7de745 6030 return 0;
b0d623f7
A
6031}
6032
0a7de745
A
6033#define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6034#define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6035#define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6036#define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6037#define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
316670eb
A
6038
6039uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6040int32_t nfs_is_mobile;
6041
0a7de745
A
6042#define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6043#define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
316670eb
A
6044
6045/*
6046 * Could this mount be squished?
6047 */
6048int
6049nfs_can_squish(struct nfsmount *nmp)
6050{
6051 uint64_t flags = vfs_flags(nmp->nm_mountp);
6052 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6053
0a7de745
A
6054 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6055 return 0;
6056 }
316670eb 6057
0a7de745
A
6058 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6059 return 0;
6060 }
316670eb 6061
0a7de745 6062 return 1;
316670eb
A
6063}
6064
6065/*
6066 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6067 * the mount may become "not responding". It's desirable to be able
6068 * to unmount these dead mounts, but only if there is no risk of
6069 * losing data or crashing applications. A "squishy" NFS mount is one
6070 * that can be force unmounted with little risk of harm.
6071 *
6072 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6073 * in a squishy state iff it is allowed to be squishy and there are no
6074 * dirty pages and there are no mmapped files and there are no files
6075 * open for write. Mounts are allowed to be squishy is controlled by
6076 * the settings of the nfs_squishy_flags and its mobility state. These
6077 * flags can be set by sysctls.
6078 *
6079 * If nfs_is_squishy determines that we are in a squishy state we will
6080 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6081 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6082 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6083 * update the current dead timeout with the mount's nm_deadtimeout
6084 * value set at mount time.
6085 *
6086 * Assumes that nm_lock is held.
6087 *
6088 * Note this routine is racey, but its effects on setting the
6089 * dead timeout only have effects when we're in trouble and are likely
6090 * to stay that way. Since by default its only for automounted
6091 * volumes on mobile machines; this is a reasonable trade off between
6092 * data integrity and user experience. It can be disabled or set via
6093 * nfs.conf file.
6094 */
6095
6096int
6097nfs_is_squishy(struct nfsmount *nmp)
6098{
6099 mount_t mp = nmp->nm_mountp;
6100 int squishy = 0;
6101 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6102
fe8ab488 6103 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
0a7de745 6104 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
316670eb 6105
0a7de745 6106 if (!nfs_can_squish(nmp)) {
316670eb 6107 goto out;
0a7de745 6108 }
316670eb 6109
0a7de745 6110 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
39236c6e 6111 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
316670eb
A
6112
6113 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6114 uint64_t flags = mp ? vfs_flags(mp) : 0;
6115 squishy = 1;
0a7de745
A
6116
6117 /*
6118 * Walk the nfs nodes and check for dirty buffers it we're not
316670eb
A
6119 * RDONLY and we've not already been declared as squishy since
6120 * this can be a bit expensive.
6121 */
0a7de745 6122 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
316670eb 6123 squishy = !nfs_mount_is_dirty(mp);
0a7de745 6124 }
316670eb
A
6125 }
6126
6127out:
0a7de745 6128 if (squishy) {
316670eb 6129 nmp->nm_state |= NFSSTA_SQUISHY;
0a7de745 6130 } else {
316670eb 6131 nmp->nm_state &= ~NFSSTA_SQUISHY;
0a7de745 6132 }
316670eb
A
6133
6134 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
0a7de745 6135
39236c6e 6136 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
316670eb 6137
0a7de745 6138 return squishy;
316670eb
A
6139}
6140
6141/*
6142 * On a send operation, if we can't reach the server and we've got only one server to talk to
6143 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6144 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6145 */
fe8ab488
A
6146int
6147nfs_is_dead(int error, struct nfsmount *nmp)
316670eb 6148{
fe8ab488
A
6149 fsid_t fsid;
6150
6151 lck_mtx_lock(&nmp->nm_lock);
6152 if (nmp->nm_state & NFSSTA_DEAD) {
6153 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 6154 return 1;
fe8ab488 6155 }
316670eb 6156
39236c6e 6157 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
fe8ab488
A
6158 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6159 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 6160 return 0;
fe8ab488 6161 }
39236c6e 6162
316670eb
A
6163 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6164 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
fe8ab488
A
6165 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6166 lck_mtx_unlock(&nmp->nm_lock);
6167 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6168 vfs_event_signal(&fsid, VQ_DEAD, 0);
0a7de745 6169 return 1;
316670eb 6170 }
fe8ab488 6171 lck_mtx_unlock(&nmp->nm_lock);
0a7de745 6172 return 0;
316670eb
A
6173}
6174
fe8ab488
A
6175/*
6176 * If we've experienced timeouts and we're not really a
6177 * classic hard mount, then just return cached data to
6178 * the caller instead of likely hanging on an RPC.
6179 */
316670eb 6180int
fe8ab488 6181nfs_use_cache(struct nfsmount *nmp)
316670eb 6182{
fe8ab488
A
6183 /*
6184 *%%% We always let mobile users goto the cache,
6185 * perhaps we should not even require them to have
6186 * a timeout?
6187 */
6188 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
0a7de745 6189 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
316670eb 6190
fe8ab488
A
6191 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6192
6193 /*
6194 * So if we have a timeout and we're not really a hard hard-mount,
6195 * return 1 to not get things out of the cache.
6196 */
316670eb 6197
0a7de745 6198 return (nmp->nm_state & timeoutmask) && cache_ok;
316670eb
A
6199}
6200
fe8ab488
A
6201/*
6202 * Log a message that nfs or lockd server is unresponsive. Check if we
6203 * can be squished and if we can, or that our dead timeout has
6204 * expired, and we're not holding state, set our mount as dead, remove
6205 * our mount state and ask to be unmounted. If we are holding state
6206 * we're being called from the nfs_request_timer and will soon detect
6207 * that we need to unmount.
6208 */
b0d623f7 6209void
fe8ab488 6210nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
b0d623f7
A
6211{
6212 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
fe8ab488 6213 uint32_t do_vfs_signal = 0;
b0d623f7
A
6214 struct timeval now;
6215
0a7de745 6216 if (nfs_mount_gone(nmp)) {
b0d623f7 6217 return;
0a7de745 6218 }
b0d623f7
A
6219
6220 lck_mtx_lock(&nmp->nm_lock);
6221
6222 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
0a7de745
A
6223 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6224 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6225 }
b0d623f7
A
6226 wasunresponsive = (nmp->nm_state & timeoutmask);
6227
6228 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6d2010ae 6229 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
b0d623f7 6230
0a7de745 6231 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
b0d623f7 6232 nmp->nm_state |= NFSSTA_TIMEO;
0a7de745
A
6233 }
6234 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
b0d623f7 6235 nmp->nm_state |= NFSSTA_LOCKTIMEO;
0a7de745
A
6236 }
6237 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
b0d623f7 6238 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
0a7de745 6239 }
b0d623f7
A
6240
6241 unresponsive = (nmp->nm_state & timeoutmask);
6242
316670eb
A
6243 nfs_is_squishy(nmp);
6244
6245 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
b0d623f7
A
6246 microuptime(&now);
6247 if (!wasunresponsive) {
6248 nmp->nm_deadto_start = now.tv_sec;
6249 nfs_mount_sock_thread_wake(nmp);
fe8ab488 6250 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
0a7de745 6251 if (!(nmp->nm_state & NFSSTA_DEAD)) {
316670eb 6252 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
0a7de745
A
6253 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6254 }
fe8ab488 6255 do_vfs_signal = VQ_DEAD;
b0d623f7
A
6256 }
6257 }
6258 lck_mtx_unlock(&nmp->nm_lock);
6259
0a7de745 6260 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
fe8ab488 6261 nfs_mount_zombie(nmp, NFSSTA_DEAD);
0a7de745 6262 } else if (softnobrowse || wasunresponsive || !unresponsive) {
b0d623f7 6263 do_vfs_signal = 0;
0a7de745 6264 } else {
b0d623f7 6265 do_vfs_signal = VQ_NOTRESP;
0a7de745
A
6266 }
6267 if (do_vfs_signal) {
b0d623f7 6268 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
0a7de745 6269 }
b0d623f7
A
6270
6271 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6272}
6273
6274void
6275nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6276{
6277 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6278 int do_vfs_signal;
6279
0a7de745 6280 if (nfs_mount_gone(nmp)) {
b0d623f7 6281 return;
0a7de745 6282 }
b0d623f7 6283
0a7de745 6284 if (msg) {
b0d623f7 6285 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
0a7de745 6286 }
b0d623f7
A
6287
6288 lck_mtx_lock(&nmp->nm_lock);
6289
6290 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
0a7de745
A
6291 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6292 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6293 }
b0d623f7
A
6294 wasunresponsive = (nmp->nm_state & timeoutmask);
6295
6296 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6d2010ae 6297 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
b0d623f7 6298
0a7de745 6299 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
b0d623f7 6300 nmp->nm_state &= ~NFSSTA_TIMEO;
0a7de745
A
6301 }
6302 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
b0d623f7 6303 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
0a7de745
A
6304 }
6305 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
b0d623f7 6306 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
0a7de745 6307 }
b0d623f7
A
6308
6309 unresponsive = (nmp->nm_state & timeoutmask);
6310
316670eb
A
6311 nmp->nm_deadto_start = 0;
6312 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6313 nmp->nm_state &= ~NFSSTA_SQUISHY;
b0d623f7
A
6314 lck_mtx_unlock(&nmp->nm_lock);
6315
0a7de745 6316 if (softnobrowse) {
b0d623f7 6317 do_vfs_signal = 0;
0a7de745 6318 } else {
b0d623f7 6319 do_vfs_signal = (wasunresponsive && !unresponsive);
0a7de745
A
6320 }
6321 if (do_vfs_signal) {
b0d623f7 6322 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
0a7de745 6323 }
b0d623f7
A
6324}
6325
6326
2d21ac55
A
6327#endif /* NFSCLIENT */
6328
6329#if NFSSERVER
6330
6331/*
6332 * Generate the rpc reply header
6333 * siz arg. is used to decide if adding a cluster is worthwhile
6334 */
6335int
6336nfsrv_rephead(
6337 struct nfsrv_descript *nd,
6338 __unused struct nfsrv_sock *slp,
6339 struct nfsm_chain *nmrepp,
6340 size_t siz)
1c79356b 6341{
2d21ac55 6342 mbuf_t mrep;
b0d623f7 6343 u_int32_t *tl;
2d21ac55
A
6344 struct nfsm_chain nmrep;
6345 int err, error;
1c79356b 6346
2d21ac55 6347 err = nd->nd_repstat;
0a7de745 6348 if (err && (nd->nd_vers == NFS_VER2)) {
2d21ac55 6349 siz = 0;
0a7de745 6350 }
d12e1678 6351
2d21ac55
A
6352 /*
6353 * If this is a big reply, use a cluster else
6354 * try and leave leading space for the lower level headers.
6355 */
6356 siz += RPC_REPLYSIZ;
6357 if (siz >= nfs_mbuf_minclsize) {
6358 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6359 } else {
6360 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6361 }
6362 if (error) {
6363 /* unable to allocate packet */
6364 /* XXX should we keep statistics for these errors? */
0a7de745 6365 return error;
2d21ac55
A
6366 }
6367 if (siz < nfs_mbuf_minclsize) {
6368 /* leave space for lower level headers */
6369 tl = mbuf_data(mrep);
0a7de745 6370 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
2d21ac55
A
6371 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6372 }
6373 nfsm_chain_init(&nmrep, mrep);
6374 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6375 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6376 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6377 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6378 if (err & NFSERR_AUTHERR) {
6379 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6380 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6381 } else {
6382 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6383 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6384 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
1c79356b 6385 }
2d21ac55
A
6386 } else {
6387 /* reply status */
6388 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6389 if (nd->nd_gss_context != NULL) {
6390 /* RPCSEC_GSS verifier */
6391 error = nfs_gss_svc_verf_put(nd, &nmrep);
6392 if (error) {
6393 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6394 goto done;
6395 }
6396 } else {
6397 /* RPCAUTH_NULL verifier */
6398 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6399 nfsm_chain_add_32(error, &nmrep, 0);
1c79356b 6400 }
2d21ac55
A
6401 /* accepted status */
6402 switch (err) {
6403 case EPROGUNAVAIL:
6404 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6405 break;
6406 case EPROGMISMATCH:
6407 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6408 /* XXX hard coded versions? */
6409 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6410 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6411 break;
6412 case EPROCUNAVAIL:
6413 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6414 break;
6415 case EBADRPC:
6416 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6417 break;
6418 default:
6419 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
0a7de745 6420 if (nd->nd_gss_context != NULL) {
2d21ac55 6421 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
0a7de745
A
6422 }
6423 if (err != NFSERR_RETVOID) {
2d21ac55 6424 nfsm_chain_add_32(error, &nmrep,
0a7de745
A
6425 (err ? nfsrv_errmap(nd, err) : 0));
6426 }
2d21ac55 6427 break;
fa4905b1 6428 }
1c79356b 6429 }
2d21ac55
A
6430
6431done:
6432 nfsm_chain_build_done(error, &nmrep);
6433 if (error) {
6434 /* error composing reply header */
6435 /* XXX should we keep statistics for these errors? */
6436 mbuf_freem(mrep);
0a7de745 6437 return error;
2d21ac55
A
6438 }
6439
6440 *nmrepp = nmrep;
0a7de745 6441 if ((err != 0) && (err != NFSERR_RETVOID)) {
316670eb 6442 OSAddAtomic64(1, &nfsstats.srvrpc_errs);
0a7de745
A
6443 }
6444 return 0;
1c79356b
A
6445}
6446
6447/*
2d21ac55
A
6448 * The nfs server send routine.
6449 *
6450 * - return EINTR or ERESTART if interrupted by a signal
6451 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6452 * - do any cleanup required by recoverable socket errors (???)
1c79356b 6453 */
2d21ac55
A
6454int
6455nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
1c79356b 6456{
2d21ac55
A
6457 int error;
6458 socket_t so = slp->ns_so;
6459 struct sockaddr *sendnam;
6460 struct msghdr msg;
1c79356b 6461
2d21ac55
A
6462 bzero(&msg, sizeof(msg));
6463 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6464 if ((sendnam = mbuf_data(nam))) {
6465 msg.msg_name = (caddr_t)sendnam;
6466 msg.msg_namelen = sendnam->sa_len;
6467 }
1c79356b 6468 }
cb323159
A
6469 if (NFS_IS_DBG(NFS_FAC_SRV, 15)) {
6470 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6471 }
2d21ac55 6472 error = sock_sendmbuf(so, &msg, top, 0, NULL);
0a7de745
A
6473 if (!error) {
6474 return 0;
6475 }
2d21ac55
A
6476 log(LOG_INFO, "nfsd send error %d\n", error);
6477
0a7de745 6478 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
2d21ac55 6479 error = EPIPE; /* zap TCP sockets if they time out on send */
0a7de745 6480 }
2d21ac55
A
6481 /* Handle any recoverable (soft) socket errors here. (???) */
6482 if (error != EINTR && error != ERESTART && error != EIO &&
0a7de745 6483 error != EWOULDBLOCK && error != EPIPE) {
2d21ac55 6484 error = 0;
0a7de745 6485 }
2d21ac55 6486
0a7de745 6487 return error;
2d21ac55 6488}
1c79356b 6489
1c79356b
A
6490/*
6491 * Socket upcall routine for the nfsd sockets.
2d21ac55 6492 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
1c79356b 6493 * Essentially do as much as possible non-blocking, else punt and it will
91447636 6494 * be called with MBUF_WAITOK from an nfsd.
1c79356b
A
6495 */
6496void
6d2010ae 6497nfsrv_rcv(socket_t so, void *arg, int waitflag)
1c79356b 6498{
6d2010ae 6499 struct nfsrv_sock *slp = arg;
1c79356b 6500
0a7de745 6501 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
1c79356b 6502 return;
0a7de745 6503 }
91447636
A
6504
6505 lck_rw_lock_exclusive(&slp->ns_rwlock);
6506 nfsrv_rcv_locked(so, slp, waitflag);
6507 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6508}
6509void
2d21ac55 6510nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
91447636
A
6511{
6512 mbuf_t m, mp, mhck, m2;
0a7de745
A
6513 int ns_flag = 0, error;
6514 struct msghdr msg;
91447636
A
6515 size_t bytes_read;
6516
6517 if ((slp->ns_flag & SLP_VALID) == 0) {
0a7de745 6518 if (waitflag == MBUF_DONTWAIT) {
91447636 6519 lck_rw_done(&slp->ns_rwlock);
0a7de745 6520 }
91447636
A
6521 return;
6522 }
6523
1c79356b
A
6524#ifdef notdef
6525 /*
6526 * Define this to test for nfsds handling this under heavy load.
6527 */
91447636
A
6528 if (waitflag == MBUF_DONTWAIT) {
6529 ns_flag = SLP_NEEDQ;
55e303ae 6530 goto dorecs;
1c79356b
A
6531 }
6532#endif
91447636 6533 if (slp->ns_sotype == SOCK_STREAM) {
1c79356b
A
6534 /*
6535 * If there are already records on the queue, defer soreceive()
b7266188 6536 * to an(other) nfsd so that there is feedback to the TCP layer that
1c79356b
A
6537 * the nfs servers are heavily loaded.
6538 */
b7266188 6539 if (slp->ns_rec) {
91447636 6540 ns_flag = SLP_NEEDQ;
1c79356b
A
6541 goto dorecs;
6542 }
6543
6544 /*
6545 * Do soreceive().
6546 */
91447636
A
6547 bytes_read = 1000000000;
6548 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6549 if (error || mp == NULL) {
0a7de745 6550 if (error == EWOULDBLOCK) {
2d21ac55 6551 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
0a7de745 6552 } else {
91447636 6553 ns_flag = SLP_DISCONN;
0a7de745 6554 }
1c79356b
A
6555 goto dorecs;
6556 }
6557 m = mp;
6558 if (slp->ns_rawend) {
0a7de745 6559 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
91447636 6560 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
0a7de745 6561 }
91447636 6562 slp->ns_cc += bytes_read;
1c79356b
A
6563 } else {
6564 slp->ns_raw = m;
91447636 6565 slp->ns_cc = bytes_read;
1c79356b 6566 }
0a7de745 6567 while ((m2 = mbuf_next(m))) {
91447636 6568 m = m2;
0a7de745 6569 }
1c79356b
A
6570 slp->ns_rawend = m;
6571
6572 /*
6573 * Now try and parse record(s) out of the raw stream data.
6574 */
6575 error = nfsrv_getstream(slp, waitflag);
6576 if (error) {
0a7de745 6577 if (error == EPERM) {
91447636 6578 ns_flag = SLP_DISCONN;
0a7de745 6579 } else {
91447636 6580 ns_flag = SLP_NEEDQ;
0a7de745 6581 }
1c79356b
A
6582 }
6583 } else {
0a7de745 6584 struct sockaddr_storage nam;
2d21ac55
A
6585
6586 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6587 /* already have max # RPC records queued on this socket */
6588 ns_flag = SLP_NEEDQ;
6589 goto dorecs;
6590 }
316670eb 6591
91447636
A
6592 bzero(&msg, sizeof(msg));
6593 msg.msg_name = (caddr_t)&nam;
6594 msg.msg_namelen = sizeof(nam);
316670eb 6595
1c79356b 6596 do {
91447636
A
6597 bytes_read = 1000000000;
6598 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
1c79356b 6599 if (mp) {
91447636
A
6600 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6601 mbuf_setlen(mhck, nam.ss_len);
6602 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
1c79356b 6603 m = mhck;
91447636
A
6604 if (mbuf_setnext(m, mp)) {
6605 /* trouble... just drop it */
6606 printf("nfsrv_rcv: mbuf_setnext failed\n");
6607 mbuf_free(mhck);
6608 m = mp;
6609 }
6610 } else {
1c79356b 6611 m = mp;
91447636 6612 }
0a7de745 6613 if (slp->ns_recend) {
91447636 6614 mbuf_setnextpkt(slp->ns_recend, m);
0a7de745 6615 } else {
1c79356b 6616 slp->ns_rec = m;
2d21ac55
A
6617 slp->ns_flag |= SLP_DOREC;
6618 }
1c79356b 6619 slp->ns_recend = m;
91447636 6620 mbuf_setnextpkt(m, NULL);
2d21ac55 6621 slp->ns_reccnt++;
4a249263 6622 }
1c79356b
A
6623 } while (mp);
6624 }
6625
6626 /*
6627 * Now try and process the request records, non-blocking.
6628 */
6629dorecs:
0a7de745 6630 if (ns_flag) {
91447636 6631 slp->ns_flag |= ns_flag;
0a7de745 6632 }
91447636 6633 if (waitflag == MBUF_DONTWAIT) {
2d21ac55 6634 int wake = (slp->ns_flag & SLP_WORKTODO);
91447636 6635 lck_rw_done(&slp->ns_rwlock);
2d21ac55 6636 if (wake && nfsd_thread_count) {
91447636
A
6637 lck_mtx_lock(nfsd_mutex);
6638 nfsrv_wakenfsd(slp);
6639 lck_mtx_unlock(nfsd_mutex);
6640 }
1c79356b
A
6641 }
6642}
6643
6644/*
6645 * Try and extract an RPC request from the mbuf data list received on a
6646 * stream socket. The "waitflag" argument indicates whether or not it
6647 * can sleep.
6648 */
b0d623f7 6649int
2d21ac55 6650nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
1c79356b 6651{
91447636
A
6652 mbuf_t m;
6653 char *cp1, *cp2, *mdata;
6654 int len, mlen, error;
6655 mbuf_t om, m2, recm;
b0d623f7 6656 u_int32_t recmark;
1c79356b 6657
0a7de745 6658 if (slp->ns_flag & SLP_GETSTREAM) {
1c79356b 6659 panic("nfs getstream");
0a7de745 6660 }
91447636 6661 slp->ns_flag |= SLP_GETSTREAM;
1c79356b 6662 for (;;) {
0a7de745
A
6663 if (slp->ns_reclen == 0) {
6664 if (slp->ns_cc < NFSX_UNSIGNED) {
6665 slp->ns_flag &= ~SLP_GETSTREAM;
6666 return 0;
6667 }
6668 m = slp->ns_raw;
6669 mdata = mbuf_data(m);
6670 mlen = mbuf_len(m);
6671 if (mlen >= NFSX_UNSIGNED) {
6672 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6673 mdata += NFSX_UNSIGNED;
6674 mlen -= NFSX_UNSIGNED;
6675 mbuf_setdata(m, mdata, mlen);
6676 } else {
6677 cp1 = (caddr_t)&recmark;
6678 cp2 = mdata;
6679 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6680 while (mlen == 0) {
6681 m = mbuf_next(m);
6682 cp2 = mbuf_data(m);
6683 mlen = mbuf_len(m);
6684 }
6685 *cp1++ = *cp2++;
6686 mlen--;
6687 mbuf_setdata(m, cp2, mlen);
1c79356b 6688 }
0a7de745
A
6689 }
6690 slp->ns_cc -= NFSX_UNSIGNED;
6691 recmark = ntohl(recmark);
6692 slp->ns_reclen = recmark & ~0x80000000;
6693 if (recmark & 0x80000000) {
6694 slp->ns_flag |= SLP_LASTFRAG;
6695 } else {
6696 slp->ns_flag &= ~SLP_LASTFRAG;
6697 }
6698 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6699 slp->ns_flag &= ~SLP_GETSTREAM;
6700 return EPERM;
6701 }
6702 }
6703
6704 /*
6705 * Now get the record part.
6706 *
6707 * Note that slp->ns_reclen may be 0. Linux sometimes
6708 * generates 0-length RPCs
6709 */
6710 recm = NULL;
6711 if (slp->ns_cc == slp->ns_reclen) {
6712 recm = slp->ns_raw;
6713 slp->ns_raw = slp->ns_rawend = NULL;
6714 slp->ns_cc = slp->ns_reclen = 0;
6715 } else if (slp->ns_cc > slp->ns_reclen) {
6716 len = 0;
6717 m = slp->ns_raw;
6718 mlen = mbuf_len(m);
6719 mdata = mbuf_data(m);
6720 om = NULL;
6721 while (len < slp->ns_reclen) {
6722 if ((len + mlen) > slp->ns_reclen) {
6723 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
91447636 6724 slp->ns_flag &= ~SLP_GETSTREAM;
0a7de745
A
6725 return EWOULDBLOCK;
6726 }
6727 if (om) {
6728 if (mbuf_setnext(om, m2)) {
6729 /* trouble... just drop it */
6730 printf("nfsrv_getstream: mbuf_setnext failed\n");
6731 mbuf_freem(m2);
6732 slp->ns_flag &= ~SLP_GETSTREAM;
6733 return EWOULDBLOCK;
6734 }
6735 recm = slp->ns_raw;
6736 } else {
6737 recm = m2;
91447636 6738 }
0a7de745
A
6739 mdata += slp->ns_reclen - len;
6740 mlen -= slp->ns_reclen - len;
6741 mbuf_setdata(m, mdata, mlen);
6742 len = slp->ns_reclen;
6743 } else if ((len + mlen) == slp->ns_reclen) {
6744 om = m;
6745 len += mlen;
6746 m = mbuf_next(m);
91447636 6747 recm = slp->ns_raw;
0a7de745
A
6748 if (mbuf_setnext(om, NULL)) {
6749 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6750 slp->ns_flag &= ~SLP_GETSTREAM;
6751 return EWOULDBLOCK;
6752 }
6753 mlen = mbuf_len(m);
6754 mdata = mbuf_data(m);
91447636 6755 } else {
0a7de745
A
6756 om = m;
6757 len += mlen;
6758 m = mbuf_next(m);
6759 mlen = mbuf_len(m);
6760 mdata = mbuf_data(m);
91447636 6761 }
0a7de745
A
6762 }
6763 slp->ns_raw = m;
6764 slp->ns_cc -= len;
6765 slp->ns_reclen = 0;
6766 } else {
6767 slp->ns_flag &= ~SLP_GETSTREAM;
6768 return 0;
6769 }
6770
6771 /*
6772 * Accumulate the fragments into a record.
6773 */
6774 if (slp->ns_frag == NULL) {
6775 slp->ns_frag = recm;
6776 } else {
6777 m = slp->ns_frag;
6778 while ((m2 = mbuf_next(m))) {
6779 m = m2;
6780 }
6781 if ((error = mbuf_setnext(m, recm))) {
6782 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error);
6783 }
6784 }
6785 if (slp->ns_flag & SLP_LASTFRAG) {
6786 if (slp->ns_recend) {
6787 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
1c79356b 6788 } else {
0a7de745
A
6789 slp->ns_rec = slp->ns_frag;
6790 slp->ns_flag |= SLP_DOREC;
6791 }
6792 slp->ns_recend = slp->ns_frag;
6793 slp->ns_frag = NULL;
6794 }
1c79356b
A
6795 }
6796}
6797
6798/*
6799 * Parse an RPC header.
6800 */
6801int
2d21ac55
A
6802nfsrv_dorec(
6803 struct nfsrv_sock *slp,
6804 struct nfsd *nfsd,
6805 struct nfsrv_descript **ndp)
1c79356b 6806{
91447636
A
6807 mbuf_t m;
6808 mbuf_t nam;
6809 struct nfsrv_descript *nd;
2d21ac55 6810 int error = 0;
1c79356b
A
6811
6812 *ndp = NULL;
0a7de745
A
6813 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6814 return ENOBUFS;
6815 }
91447636 6816 MALLOC_ZONE(nd, struct nfsrv_descript *,
0a7de745
A
6817 sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
6818 if (!nd) {
6819 return ENOMEM;
6820 }
91447636
A
6821 m = slp->ns_rec;
6822 slp->ns_rec = mbuf_nextpkt(m);
0a7de745 6823 if (slp->ns_rec) {
91447636 6824 mbuf_setnextpkt(m, NULL);
0a7de745 6825 } else {
2d21ac55 6826 slp->ns_flag &= ~SLP_DOREC;
91447636 6827 slp->ns_recend = NULL;
2d21ac55
A
6828 }
6829 slp->ns_reccnt--;
91447636 6830 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
1c79356b 6831 nam = m;
91447636 6832 m = mbuf_next(m);
0a7de745 6833 if ((error = mbuf_setnext(nam, NULL))) {
91447636 6834 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
0a7de745
A
6835 }
6836 } else {
1c79356b 6837 nam = NULL;
0a7de745 6838 }
1c79356b 6839 nd->nd_nam2 = nam;
2d21ac55 6840 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
0a7de745 6841 if (!error) {
2d21ac55 6842 error = nfsrv_getreq(nd);
0a7de745 6843 }
1c79356b 6844 if (error) {
0a7de745 6845 if (nam) {
91447636 6846 mbuf_freem(nam);
0a7de745
A
6847 }
6848 if (nd->nd_gss_context) {
6d2010ae 6849 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
0a7de745 6850 }
2d21ac55 6851 FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC);
0a7de745 6852 return error;
1c79356b 6853 }
2d21ac55 6854 nd->nd_mrep = NULL;
1c79356b
A
6855 *ndp = nd;
6856 nfsd->nfsd_nd = nd;
0a7de745 6857 return 0;
1c79356b
A
6858}
6859
6860/*
6861 * Parse an RPC request
6862 * - verify it
6863 * - fill in the cred struct.
6864 */
b0d623f7 6865int
2d21ac55 6866nfsrv_getreq(struct nfsrv_descript *nd)
1c79356b 6867{
2d21ac55 6868 struct nfsm_chain *nmreq;
91447636 6869 int len, i;
b0d623f7 6870 u_int32_t nfsvers, auth_type;
2d21ac55 6871 int error = 0;
91447636
A
6872 uid_t user_id;
6873 gid_t group_id;
6874 int ngroups;
2d21ac55 6875 uint32_t val;
1c79356b 6876
91447636 6877 nd->nd_cr = NULL;
2d21ac55
A
6878 nd->nd_gss_context = NULL;
6879 nd->nd_gss_seqnum = 0;
6880 nd->nd_gss_mb = NULL;
6881
6882 user_id = group_id = -2;
6883 val = auth_type = len = 0;
6884
6885 nmreq = &nd->nd_nmreq;
0a7de745
A
6886 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6887 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6888 if (!error && (val != RPC_CALL)) {
2d21ac55 6889 error = EBADRPC;
0a7de745 6890 }
2d21ac55 6891 nfsmout_if(error);
1c79356b 6892 nd->nd_repstat = 0;
0a7de745 6893 nfsm_chain_get_32(error, nmreq, val); // RPC Version
2d21ac55
A
6894 nfsmout_if(error);
6895 if (val != RPC_VER2) {
1c79356b
A
6896 nd->nd_repstat = ERPCMISMATCH;
6897 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6898 return 0;
1c79356b 6899 }
0a7de745 6900 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
2d21ac55
A
6901 nfsmout_if(error);
6902 if (val != NFS_PROG) {
91447636
A
6903 nd->nd_repstat = EPROGUNAVAIL;
6904 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6905 return 0;
1c79356b 6906 }
2d21ac55
A
6907 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6908 nfsmout_if(error);
91447636 6909 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
1c79356b
A
6910 nd->nd_repstat = EPROGMISMATCH;
6911 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6912 return 0;
1c79356b 6913 }
2d21ac55
A
6914 nd->nd_vers = nfsvers;
6915 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6916 nfsmout_if(error);
91447636 6917 if ((nd->nd_procnum >= NFS_NPROCS) ||
0a7de745 6918 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
1c79356b
A
6919 nd->nd_repstat = EPROCUNAVAIL;
6920 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6921 return 0;
1c79356b 6922 }
0a7de745 6923 if (nfsvers != NFS_VER3) {
1c79356b 6924 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
0a7de745
A
6925 }
6926 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
6927 nfsm_chain_get_32(error, nmreq, len); // Auth Length
6928 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
2d21ac55 6929 error = EBADRPC;
0a7de745 6930 }
2d21ac55
A
6931 nfsmout_if(error);
6932
6933 /* Handle authentication */
6d2010ae
A
6934 if (auth_type == RPCAUTH_SYS) {
6935 struct posix_cred temp_pcred;
0a7de745
A
6936 if (nd->nd_procnum == NFSPROC_NULL) {
6937 return 0;
6938 }
6d2010ae 6939 nd->nd_sec = RPCAUTH_SYS;
0a7de745
A
6940 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
6941 nfsm_chain_get_32(error, nmreq, len); // hostname length
6942 if (len < 0 || len > NFS_MAXNAMLEN) {
2d21ac55 6943 error = EBADRPC;
0a7de745
A
6944 }
6945 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
2d21ac55
A
6946 nfsmout_if(error);
6947
6948 /* create a temporary credential using the bits from the wire */
6d2010ae 6949 bzero(&temp_pcred, sizeof(temp_pcred));
2d21ac55
A
6950 nfsm_chain_get_32(error, nmreq, user_id);
6951 nfsm_chain_get_32(error, nmreq, group_id);
6d2010ae 6952 temp_pcred.cr_groups[0] = group_id;
0a7de745
A
6953 nfsm_chain_get_32(error, nmreq, len); // extra GID count
6954 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
2d21ac55 6955 error = EBADRPC;
0a7de745 6956 }
2d21ac55 6957 nfsmout_if(error);
0a7de745
A
6958 for (i = 1; i <= len; i++) {
6959 if (i < NGROUPS) {
6d2010ae 6960 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
0a7de745 6961 } else {
2d21ac55 6962 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
0a7de745
A
6963 }
6964 }
2d21ac55 6965 nfsmout_if(error);
91447636 6966 ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
0a7de745 6967 if (ngroups > 1) {
6d2010ae 6968 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
0a7de745
A
6969 }
6970 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
6971 nfsm_chain_get_32(error, nmreq, len); // verifier length
6972 if (len < 0 || len > RPCAUTH_MAXSIZ) {
2d21ac55 6973 error = EBADRPC;
0a7de745
A
6974 }
6975 if (len > 0) {
2d21ac55 6976 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
0a7de745 6977 }
2d21ac55
A
6978
6979 /* request creation of a real credential */
6d2010ae
A
6980 temp_pcred.cr_uid = user_id;
6981 temp_pcred.cr_ngroups = ngroups;
6982 nd->nd_cr = posix_cred_create(&temp_pcred);
91447636
A
6983 if (nd->nd_cr == NULL) {
6984 nd->nd_repstat = ENOMEM;
6985 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6986 return 0;
91447636 6987 }
2d21ac55
A
6988 } else if (auth_type == RPCSEC_GSS) {
6989 error = nfs_gss_svc_cred_get(nd, nmreq);
6990 if (error) {
0a7de745
A
6991 if (error == EINVAL) {
6992 goto nfsmout; // drop the request
6993 }
2d21ac55
A
6994 nd->nd_repstat = error;
6995 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 6996 return 0;
2d21ac55 6997 }
1c79356b 6998 } else {
0a7de745
A
6999 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7000 return 0;
7001 }
1c79356b
A
7002 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7003 nd->nd_procnum = NFSPROC_NOOP;
0a7de745 7004 return 0;
1c79356b 7005 }
0a7de745 7006 return 0;
1c79356b 7007nfsmout:
0a7de745 7008 if (IS_VALID_CRED(nd->nd_cr)) {
0c530ab8 7009 kauth_cred_unref(&nd->nd_cr);
0a7de745 7010 }
2d21ac55 7011 nfsm_chain_cleanup(nmreq);
0a7de745 7012 return error;
1c79356b
A
7013}
7014
7015/*
7016 * Search for a sleeping nfsd and wake it up.
2d21ac55
A
7017 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7018 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
91447636 7019 * Note: Must be called with nfsd_mutex held.
1c79356b
A
7020 */
7021void
2d21ac55 7022nfsrv_wakenfsd(struct nfsrv_sock *slp)
1c79356b 7023{
91447636 7024 struct nfsd *nd;
1c79356b 7025
0a7de745 7026 if ((slp->ns_flag & SLP_VALID) == 0) {
1c79356b 7027 return;
0a7de745 7028 }
91447636
A
7029
7030 lck_rw_lock_exclusive(&slp->ns_rwlock);
2d21ac55
A
7031 /* if there's work to do on this socket, make sure it's queued up */
7032 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7033 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7034 slp->ns_flag |= SLP_WAITQ;
1c79356b 7035 }
91447636
A
7036 lck_rw_done(&slp->ns_rwlock);
7037
2d21ac55
A
7038 /* wake up a waiting nfsd, if possible */
7039 nd = TAILQ_FIRST(&nfsd_queue);
0a7de745 7040 if (!nd) {
2d21ac55 7041 return;
0a7de745 7042 }
2d21ac55
A
7043
7044 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7045 nd->nfsd_flag &= ~NFSD_WAITING;
7046 wakeup(nd);
1c79356b 7047}
2d21ac55
A
7048
7049#endif /* NFSSERVER */