]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_socket.c
e5c2a590ef55fac4286f6eb3ce28bb0f76d5c827
[apple/xnu.git] / bsd / nfs / nfs_socket.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS
70
71 /*
72 * Socket operations for use by nfs
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/un.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
92
93 #include <sys/time.h>
94 #include <kern/clock.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/thread_call.h>
98 #include <sys/user.h>
99 #include <sys/acct.h>
100
101 #include <netinet/in.h>
102 #include <netinet/tcp.h>
103
104 #include <nfs/rpcv2.h>
105 #include <nfs/krpc.h>
106 #include <nfs/nfsproto.h>
107 #include <nfs/nfs.h>
108 #include <nfs/xdr_subs.h>
109 #include <nfs/nfsm_subs.h>
110 #include <nfs/nfs_gss.h>
111 #include <nfs/nfsmount.h>
112 #include <nfs/nfsnode.h>
113
114 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
115 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
116
117 /* XXX */
118 boolean_t current_thread_aborted(void);
119 kern_return_t thread_terminate(thread_t);
120
121
122 #if CONFIG_NFS_SERVER
123 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
124
125 int nfsrv_getstream(struct nfsrv_sock *, int);
126 int nfsrv_getreq(struct nfsrv_descript *);
127 extern int nfsv3_procid[NFS_NPROCS];
128 #endif /* CONFIG_NFS_SERVER */
129
130 /*
131 * compare two sockaddr structures
132 */
133 int
134 nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
135 {
136 if (!sa1) {
137 return -1;
138 }
139 if (!sa2) {
140 return 1;
141 }
142 if (sa1->sa_family != sa2->sa_family) {
143 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
144 }
145 if (sa1->sa_len != sa2->sa_len) {
146 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
147 }
148 if (sa1->sa_family == AF_INET) {
149 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
150 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
151 }
152 if (sa1->sa_family == AF_INET6) {
153 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
154 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
155 }
156 return -1;
157 }
158
159 #if CONFIG_NFS_CLIENT
160
161 int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
162 int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
163 int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
164 void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
165 void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
166 int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
167 int nfs_reconnect(struct nfsmount *);
168 int nfs_connect_setup(struct nfsmount *);
169 void nfs_mount_sock_thread(void *, wait_result_t);
170 void nfs_udp_rcv(socket_t, void*, int);
171 void nfs_tcp_rcv(socket_t, void*, int);
172 void nfs_sock_poke(struct nfsmount *);
173 void nfs_request_match_reply(struct nfsmount *, mbuf_t);
174 void nfs_reqdequeue(struct nfsreq *);
175 void nfs_reqbusy(struct nfsreq *);
176 struct nfsreq *nfs_reqnext(struct nfsreq *);
177 int nfs_wait_reply(struct nfsreq *);
178 void nfs_softterm(struct nfsreq *);
179 int nfs_can_squish(struct nfsmount *);
180 int nfs_is_squishy(struct nfsmount *);
181 int nfs_is_dead(int, struct nfsmount *);
182
183 /*
184 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
185 * Use the mean and mean deviation of rtt for the appropriate type of rpc
186 * for the frequent rpcs and a default for the others.
187 * The justification for doing "other" this way is that these rpcs
188 * happen so infrequently that timer est. would probably be stale.
189 * Also, since many of these rpcs are
190 * non-idempotent, a conservative timeout is desired.
191 * getattr, lookup - A+2D
192 * read, write - A+4D
193 * other - nm_timeo
194 */
195 #define NFS_RTO(n, t) \
196 ((t) == 0 ? (n)->nm_timeo : \
197 ((t) < 3 ? \
198 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
199 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
200 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
201 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
202
203 /*
204 * Defines which timer to use for the procnum.
205 * 0 - default
206 * 1 - getattr
207 * 2 - lookup
208 * 3 - read
209 * 4 - write
210 */
211 static const int proct[] = {
212 [NFSPROC_NULL] = 0,
213 [NFSPROC_GETATTR] = 1,
214 [NFSPROC_SETATTR] = 0,
215 [NFSPROC_LOOKUP] = 2,
216 [NFSPROC_ACCESS] = 1,
217 [NFSPROC_READLINK] = 3,
218 [NFSPROC_READ] = 3,
219 [NFSPROC_WRITE] = 4,
220 [NFSPROC_CREATE] = 0,
221 [NFSPROC_MKDIR] = 0,
222 [NFSPROC_SYMLINK] = 0,
223 [NFSPROC_MKNOD] = 0,
224 [NFSPROC_REMOVE] = 0,
225 [NFSPROC_RMDIR] = 0,
226 [NFSPROC_RENAME] = 0,
227 [NFSPROC_LINK] = 0,
228 [NFSPROC_READDIR] = 3,
229 [NFSPROC_READDIRPLUS] = 3,
230 [NFSPROC_FSSTAT] = 0,
231 [NFSPROC_FSINFO] = 0,
232 [NFSPROC_PATHCONF] = 0,
233 [NFSPROC_COMMIT] = 0,
234 [NFSPROC_NOOP] = 0,
235 };
236
237 /*
238 * There is a congestion window for outstanding rpcs maintained per mount
239 * point. The cwnd size is adjusted in roughly the way that:
240 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
241 * SIGCOMM '88". ACM, August 1988.
242 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
243 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
244 * of rpcs is in progress.
245 * (The sent count and cwnd are scaled for integer arith.)
246 * Variants of "slow start" were tried and were found to be too much of a
247 * performance hit (ave. rtt 3 times larger),
248 * I suspect due to the large rtt that nfs rpcs have.
249 */
250 #define NFS_CWNDSCALE 256
251 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
252 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
253
254 /*
255 * Increment location index to next address/server/location.
256 */
257 void
258 nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
259 {
260 uint8_t loc = nlip->nli_loc;
261 uint8_t serv = nlip->nli_serv;
262 uint8_t addr = nlip->nli_addr;
263
264 /* move to next address */
265 addr++;
266 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
267 /* no more addresses on current server, go to first address of next server */
268 next_server:
269 addr = 0;
270 serv++;
271 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
272 /* no more servers on current location, go to first server of next location */
273 serv = 0;
274 loc++;
275 if (loc >= nlp->nl_numlocs) {
276 loc = 0; /* after last location, wrap back around to first location */
277 }
278 }
279 }
280 /*
281 * It's possible for this next server to not have any addresses.
282 * Check for that here and go to the next server.
283 * But bail out if we've managed to come back around to the original
284 * location that was passed in. (That would mean no servers had any
285 * addresses. And we don't want to spin here forever.)
286 */
287 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
288 return;
289 }
290 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
291 goto next_server;
292 }
293
294 nlip->nli_loc = loc;
295 nlip->nli_serv = serv;
296 nlip->nli_addr = addr;
297 }
298
299 /*
300 * Compare two location indices.
301 */
302 int
303 nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
304 {
305 if (nlip1->nli_loc != nlip2->nli_loc) {
306 return nlip1->nli_loc - nlip2->nli_loc;
307 }
308 if (nlip1->nli_serv != nlip2->nli_serv) {
309 return nlip1->nli_serv - nlip2->nli_serv;
310 }
311 return nlip1->nli_addr - nlip2->nli_addr;
312 }
313
314 /*
315 * Get the mntfromname (or path portion only) for a given location.
316 */
317 void
318 nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly)
319 {
320 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
321 char *p;
322 int cnt, i;
323
324 p = s;
325 if (!pathonly) {
326 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
327 if (name == NULL) {
328 name = "";
329 }
330 if (*name == '\0') {
331 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
332 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
333 }
334 cnt = scnprintf(p, size, "<%s>:", name);
335 } else {
336 cnt = scnprintf(p, size, "%s:", name);
337 }
338 p += cnt;
339 size -= cnt;
340 }
341 if (fsl->nl_path.np_compcount == 0) {
342 /* mounting root export on server */
343 if (size > 0) {
344 *p++ = '/';
345 *p++ = '\0';
346 }
347 return;
348 }
349 /* append each server path component */
350 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
351 cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
352 p += cnt;
353 size -= cnt;
354 }
355 }
356
357 /*
358 * NFS client connect socket upcall.
359 * (Used only during socket connect/search.)
360 */
361 void
362 nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
363 {
364 struct nfs_socket *nso = arg;
365 size_t rcvlen;
366 mbuf_t m;
367 int error = 0, recv = 1;
368
369 if (nso->nso_flags & NSO_CONNECTING) {
370 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
371 wakeup(nso->nso_wake);
372 return;
373 }
374
375 lck_mtx_lock(&nso->nso_lock);
376 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
377 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
378 lck_mtx_unlock(&nso->nso_lock);
379 return;
380 }
381 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
382 nso->nso_flags |= NSO_UPCALL;
383
384 /* loop while we make error-free progress */
385 while (!error && recv) {
386 /* make sure we're still interested in this socket */
387 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
388 break;
389 }
390 lck_mtx_unlock(&nso->nso_lock);
391 m = NULL;
392 if (nso->nso_sotype == SOCK_STREAM) {
393 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
394 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
395 } else {
396 rcvlen = 1000000;
397 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
398 recv = m ? 1 : 0;
399 }
400 lck_mtx_lock(&nso->nso_lock);
401 if (m) {
402 /* match response with request */
403 struct nfsm_chain nmrep;
404 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
405 uint32_t reply_status, rejected_status, accepted_status;
406
407 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
408 nfsm_chain_dissect_init(error, &nmrep, m);
409 nfsm_chain_get_32(error, &nmrep, rxid);
410 nfsm_chain_get_32(error, &nmrep, reply);
411 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
412 error = EBADRPC;
413 }
414 nfsm_chain_get_32(error, &nmrep, reply_status);
415 if (!error && (reply_status == RPC_MSGDENIED)) {
416 nfsm_chain_get_32(error, &nmrep, rejected_status);
417 if (!error) {
418 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
419 }
420 }
421 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
422 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
423 nfsmout_if(error);
424 if (verf_len) {
425 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
426 }
427 nfsm_chain_get_32(error, &nmrep, accepted_status);
428 nfsmout_if(error);
429 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
430 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
431 uint32_t minvers, maxvers;
432 nfsm_chain_get_32(error, &nmrep, minvers);
433 nfsm_chain_get_32(error, &nmrep, maxvers);
434 nfsmout_if(error);
435 if (nso->nso_protocol == PMAPPROG) {
436 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
437 error = EPROGMISMATCH;
438 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
439 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
440 nso->nso_version = PMAPVERS;
441 } else if (nso->nso_saddr->sa_family == AF_INET6) {
442 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
443 nso->nso_version = RPCBVERS4;
444 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
445 nso->nso_version = RPCBVERS3;
446 }
447 }
448 } else if (nso->nso_protocol == NFS_PROG) {
449 int vers;
450
451 /*
452 * N.B. Both portmapper and rpcbind V3 are happy to return
453 * addresses for other versions than the one you ask (getport or
454 * getaddr) and thus we may have fallen to this code path. So if
455 * we get a version that we support, use highest supported
456 * version. This assumes that the server supports all versions
457 * between minvers and maxvers. Note for IPv6 we will try and
458 * use rpcbind V4 which has getversaddr and we should not get
459 * here if that was successful.
460 */
461 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
462 if (vers >= (int)minvers && vers <= (int)maxvers) {
463 break;
464 }
465 }
466 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
467 }
468 if (!error && nso->nso_version) {
469 accepted_status = RPC_SUCCESS;
470 }
471 }
472 if (!error) {
473 switch (accepted_status) {
474 case RPC_SUCCESS:
475 error = 0;
476 break;
477 case RPC_PROGUNAVAIL:
478 error = EPROGUNAVAIL;
479 break;
480 case RPC_PROGMISMATCH:
481 error = EPROGMISMATCH;
482 break;
483 case RPC_PROCUNAVAIL:
484 error = EPROCUNAVAIL;
485 break;
486 case RPC_GARBAGE:
487 error = EBADRPC;
488 break;
489 case RPC_SYSTEM_ERR:
490 default:
491 error = EIO;
492 break;
493 }
494 }
495 nfsmout:
496 nso->nso_flags &= ~NSO_PINGING;
497 if (error) {
498 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
499 nso->nso_protocol, nso->nso_version, error);
500 nso->nso_error = error;
501 nso->nso_flags |= NSO_DEAD;
502 } else {
503 nso->nso_flags |= NSO_VERIFIED;
504 }
505 mbuf_freem(m);
506 /* wake up search thread */
507 wakeup(nso->nso_wake);
508 break;
509 }
510 }
511
512 nso->nso_flags &= ~NSO_UPCALL;
513 if ((error != EWOULDBLOCK) && (error || !recv)) {
514 /* problems with the socket... */
515 NFS_SOCK_DBG("connect upcall failed %d\n", error);
516 nso->nso_error = error ? error : EPIPE;
517 nso->nso_flags |= NSO_DEAD;
518 wakeup(nso->nso_wake);
519 }
520 if (nso->nso_flags & NSO_DISCONNECTING) {
521 wakeup(&nso->nso_flags);
522 }
523 lck_mtx_unlock(&nso->nso_lock);
524 }
525
526 /*
527 * Create/initialize an nfs_socket structure.
528 */
529 int
530 nfs_socket_create(
531 struct nfsmount *nmp,
532 struct sockaddr *sa,
533 int sotype,
534 in_port_t port,
535 uint32_t protocol,
536 uint32_t vers,
537 int resvport,
538 struct nfs_socket **nsop)
539 {
540 struct nfs_socket *nso;
541 struct timeval now;
542 int error;
543 #define NFS_SOCKET_DEBUGGING
544 #ifdef NFS_SOCKET_DEBUGGING
545 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
546 void *sinaddr;
547
548 switch (sa->sa_family) {
549 case AF_INET:
550 case AF_INET6:
551 if (sa->sa_family == AF_INET) {
552 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
553 } else {
554 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
555 }
556 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
557 strlcpy(naddr, "<unknown>", sizeof(naddr));
558 }
559 break;
560 case AF_LOCAL:
561 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
562 break;
563 default:
564 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
565 break;
566 }
567 #else
568 char naddr[1] = { 0 };
569 #endif
570
571 *nsop = NULL;
572
573 /* Create the socket. */
574 MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO);
575 if (nso) {
576 MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO);
577 }
578 if (!nso || !nso->nso_saddr) {
579 if (nso) {
580 FREE(nso, M_TEMP);
581 }
582 return ENOMEM;
583 }
584 lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL);
585 nso->nso_sotype = sotype;
586 if (nso->nso_sotype == SOCK_STREAM) {
587 nfs_rpc_record_state_init(&nso->nso_rrs);
588 }
589 microuptime(&now);
590 nso->nso_timestamp = now.tv_sec;
591 bcopy(sa, nso->nso_saddr, sa->sa_len);
592 switch (sa->sa_family) {
593 case AF_INET:
594 case AF_INET6:
595 if (sa->sa_family == AF_INET) {
596 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
597 } else if (sa->sa_family == AF_INET6) {
598 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
599 }
600 break;
601 case AF_LOCAL:
602 break;
603 }
604 nso->nso_protocol = protocol;
605 nso->nso_version = vers;
606 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
607 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
608
609 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
610
611 /* Some servers require that the client port be a reserved port number. */
612 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
613 struct sockaddr_storage ss;
614 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
615 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
616 int portrange = IP_PORTRANGE_LOW;
617
618 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
619 if (!error) { /* bind now to check for failure */
620 ss.ss_len = sa->sa_len;
621 ss.ss_family = sa->sa_family;
622 if (ss.ss_family == AF_INET) {
623 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
624 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
625 } else if (ss.ss_family == AF_INET6) {
626 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
627 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
628 } else {
629 error = EINVAL;
630 }
631 if (!error) {
632 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
633 }
634 }
635 }
636
637 if (error) {
638 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
639 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
640 resvport ? "r" : "", port, protocol, vers);
641 nfs_socket_destroy(nso);
642 } else {
643 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
644 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
645 sotype, resvport ? "r" : "", port, protocol, vers);
646 *nsop = nso;
647 }
648 return error;
649 }
650
651 /*
652 * Destroy an nfs_socket structure.
653 */
654 void
655 nfs_socket_destroy(struct nfs_socket *nso)
656 {
657 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
658
659 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
660 lck_mtx_lock(&nso->nso_lock);
661 nso->nso_flags |= NSO_DISCONNECTING;
662 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
663 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
664 }
665 lck_mtx_unlock(&nso->nso_lock);
666 sock_shutdown(nso->nso_so, SHUT_RDWR);
667 sock_close(nso->nso_so);
668 if (nso->nso_sotype == SOCK_STREAM) {
669 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
670 }
671 lck_mtx_destroy(&nso->nso_lock, nfs_request_grp);
672 if (nso->nso_saddr) {
673 FREE(nso->nso_saddr, M_SONAME);
674 }
675 if (nso->nso_saddr2) {
676 FREE(nso->nso_saddr2, M_SONAME);
677 }
678 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
679 FREE(nso, M_TEMP);
680 }
681
682 /*
683 * Set common socket options on an nfs_socket.
684 */
685 void
686 nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
687 {
688 /*
689 * Set socket send/receive timeouts
690 * - Receive timeout shouldn't matter because most receives are performed
691 * in the socket upcall non-blocking.
692 * - Send timeout should allow us to react to a blocked socket.
693 * Soft mounts will want to abort sooner.
694 */
695 struct timeval timeo;
696 int on = 1, proto;
697
698 timeo.tv_usec = 0;
699 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
700 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
701 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
702 if (nso->nso_sotype == SOCK_STREAM) {
703 /* Assume that SOCK_STREAM always requires a connection */
704 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
705 /* set nodelay for TCP */
706 sock_gettype(nso->nso_so, NULL, NULL, &proto);
707 if (proto == IPPROTO_TCP) {
708 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
709 }
710 }
711 if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */
712 int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024);
713 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
714 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
715 }
716 /* set SO_NOADDRERR to detect network changes ASAP */
717 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
718 /* just playin' it safe with upcalls */
719 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
720 /* socket should be interruptible if the mount is */
721 if (!NMFLAG(nmp, INTR)) {
722 sock_nointerrupt(nso->nso_so, 1);
723 }
724 }
725
726 /*
727 * Release resources held in an nfs_socket_search.
728 */
729 void
730 nfs_socket_search_cleanup(struct nfs_socket_search *nss)
731 {
732 struct nfs_socket *nso, *nsonext;
733
734 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
735 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
736 nss->nss_sockcnt--;
737 nfs_socket_destroy(nso);
738 }
739 if (nss->nss_sock) {
740 nfs_socket_destroy(nss->nss_sock);
741 nss->nss_sock = NULL;
742 }
743 }
744
745 /*
746 * Prefer returning certain errors over others.
747 * This function returns a ranking of the given error.
748 */
749 int
750 nfs_connect_error_class(int error)
751 {
752 switch (error) {
753 case 0:
754 return 0;
755 case ETIMEDOUT:
756 case EAGAIN:
757 return 1;
758 case EPIPE:
759 case EADDRNOTAVAIL:
760 case ENETDOWN:
761 case ENETUNREACH:
762 case ENETRESET:
763 case ECONNABORTED:
764 case ECONNRESET:
765 case EISCONN:
766 case ENOTCONN:
767 case ESHUTDOWN:
768 case ECONNREFUSED:
769 case EHOSTDOWN:
770 case EHOSTUNREACH:
771 return 2;
772 case ERPCMISMATCH:
773 case EPROCUNAVAIL:
774 case EPROGMISMATCH:
775 case EPROGUNAVAIL:
776 return 3;
777 case EBADRPC:
778 return 4;
779 default:
780 return 5;
781 }
782 }
783
784 /*
785 * Make sure a socket search returns the best error.
786 */
787 void
788 nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
789 {
790 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
791 nss->nss_error = error;
792 }
793 }
794
795 /* nfs_connect_search_new_socket:
796 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
797 * by nss.
798 *
799 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
800 * could not be used or if a socket timed out.
801 */
802 int
803 nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
804 {
805 struct nfs_fs_location *fsl;
806 struct nfs_fs_server *fss;
807 struct sockaddr_storage ss;
808 struct nfs_socket *nso;
809 char *addrstr;
810 int error = 0;
811
812
813 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
814 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
815
816 /*
817 * while there are addresses and:
818 * we have no sockets or
819 * the last address failed and did not produce a socket (nss_last < 0) or
820 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
821 * then attempt to create a socket with the current address.
822 */
823 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
824 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
825 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
826 return EINTR;
827 }
828 /* Can we convert the address to a sockaddr? */
829 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
830 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
831 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
832 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
833 if (*addrstr == '\0') {
834 /*
835 * We have an unspecified local domain address. We use the program to translate to
836 * a well known local transport address. We only support PMAPROG and NFS for this.
837 */
838 if (nss->nss_protocol == PMAPPROG) {
839 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
840 } else if (nss->nss_protocol == NFS_PROG) {
841 addrstr = nmp->nm_nfs_localport;
842 if (!addrstr || *addrstr == '\0') {
843 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
844 }
845 }
846 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
847 }
848 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
849 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
850 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
851 nss->nss_addrcnt -= 1;
852 nss->nss_last = -2;
853 continue;
854 }
855 /* Check that socket family is acceptable. */
856 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
857 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
858 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
859 nss->nss_addrcnt -= 1;
860 nss->nss_last = -2;
861 continue;
862 }
863
864 /* Create the socket. */
865 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
866 nss->nss_port, nss->nss_protocol, nss->nss_version,
867 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
868 if (error) {
869 return error;
870 }
871
872 nso->nso_location = nss->nss_nextloc;
873 nso->nso_wake = nss;
874 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
875 if (error) {
876 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
877 lck_mtx_lock(&nso->nso_lock);
878 nso->nso_error = error;
879 nso->nso_flags |= NSO_DEAD;
880 lck_mtx_unlock(&nso->nso_lock);
881 }
882
883 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
884 nss->nss_sockcnt++;
885 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
886 nss->nss_addrcnt -= 1;
887
888 nss->nss_last = now->tv_sec;
889 }
890
891 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
892 nss->nss_last = now->tv_sec;
893 }
894
895 return error;
896 }
897
898 /*
899 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
900 * If successful set the socket options for the socket as require from the mount.
901 *
902 * Assumes: nso->nso_lock is held on entry and return.
903 */
904 int
905 nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
906 {
907 int error;
908
909 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
910 /* no connection needed, just say it's already connected */
911 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
912 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
913 nso->nso_flags |= NSO_CONNECTED;
914 nfs_socket_options(nmp, nso);
915 return 1; /* Socket is connected and setup */
916 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
917 /* initiate the connection */
918 nso->nso_flags |= NSO_CONNECTING;
919 lck_mtx_unlock(&nso->nso_lock);
920 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
921 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
922 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
923 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
924 if (error) {
925 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
926 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
927 }
928 lck_mtx_lock(&nso->nso_lock);
929 if (error && (error != EINPROGRESS)) {
930 nso->nso_error = error;
931 nso->nso_flags |= NSO_DEAD;
932 return 0;
933 }
934 }
935 if (nso->nso_flags & NSO_CONNECTING) {
936 /* check the connection */
937 if (sock_isconnected(nso->nso_so)) {
938 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
939 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
940 nso->nso_flags &= ~NSO_CONNECTING;
941 nso->nso_flags |= NSO_CONNECTED;
942 nfs_socket_options(nmp, nso);
943 return 1; /* Socket is connected and setup */
944 } else {
945 int optlen = sizeof(error);
946 error = 0;
947 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
948 if (error) { /* we got an error on the socket */
949 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
950 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
951 if (verbose) {
952 printf("nfs connect socket error %d for %s\n",
953 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
954 }
955 nso->nso_error = error;
956 nso->nso_flags |= NSO_DEAD;
957 return 0;
958 }
959 }
960 }
961
962 return 0; /* Waiting to be connected */
963 }
964
965 /*
966 * nfs_connect_search_ping: Send a null proc on the nso socket.
967 */
968 int
969 nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
970 {
971 /* initiate a NULL RPC request */
972 uint64_t xid = nso->nso_pingxid;
973 mbuf_t m, mreq = NULL;
974 struct msghdr msg;
975 size_t reqlen, sentlen;
976 uint32_t vers = nso->nso_version;
977 int error;
978
979 if (!vers) {
980 if (nso->nso_protocol == PMAPPROG) {
981 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
982 } else if (nso->nso_protocol == NFS_PROG) {
983 vers = PVER2MAJOR(nmp->nm_max_vers);
984 }
985 }
986 lck_mtx_unlock(&nso->nso_lock);
987 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
988 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
989 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
990 lck_mtx_lock(&nso->nso_lock);
991 if (!error) {
992 nso->nso_flags |= NSO_PINGING;
993 nso->nso_pingxid = R_XID32(xid);
994 nso->nso_reqtimestamp = now->tv_sec;
995 bzero(&msg, sizeof(msg));
996 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
997 msg.msg_name = nso->nso_saddr;
998 msg.msg_namelen = nso->nso_saddr->sa_len;
999 }
1000 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
1001 reqlen += mbuf_len(m);
1002 }
1003 lck_mtx_unlock(&nso->nso_lock);
1004 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
1005 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1006 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1007 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1008 lck_mtx_lock(&nso->nso_lock);
1009 if (!error && (sentlen != reqlen)) {
1010 error = ETIMEDOUT;
1011 }
1012 }
1013 if (error) {
1014 nso->nso_error = error;
1015 nso->nso_flags |= NSO_DEAD;
1016 return 0;
1017 }
1018
1019 return 1;
1020 }
1021
1022 /*
1023 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1024 * Set the nfs socket protocol and version if needed.
1025 */
1026 void
1027 nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
1028 {
1029 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1030 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1031 if (!nso->nso_version) {
1032 /* If the version isn't set, the default must have worked. */
1033 if (nso->nso_protocol == PMAPPROG) {
1034 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1035 }
1036 if (nso->nso_protocol == NFS_PROG) {
1037 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
1038 }
1039 }
1040 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1041 nss->nss_sockcnt--;
1042 nss->nss_sock = nso;
1043 }
1044
1045 /*
1046 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1047 * the list. Dead socket are then destroyed.
1048 */
1049 void
1050 nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1051 {
1052 struct nfs_socket *nso, *nsonext;
1053
1054 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1055 lck_mtx_lock(&nso->nso_lock);
1056 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
1057 /* took too long */
1058 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1059 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1060 nso->nso_error = ETIMEDOUT;
1061 nso->nso_flags |= NSO_DEAD;
1062 }
1063 if (!(nso->nso_flags & NSO_DEAD)) {
1064 lck_mtx_unlock(&nso->nso_lock);
1065 continue;
1066 }
1067 lck_mtx_unlock(&nso->nso_lock);
1068 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1069 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
1070 nfs_socket_search_update_error(nss, nso->nso_error);
1071 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1072 nss->nss_sockcnt--;
1073 nfs_socket_destroy(nso);
1074 /* If there are more sockets to try, force the starting of another socket */
1075 if (nss->nss_addrcnt > 0) {
1076 nss->nss_last = -2;
1077 }
1078 }
1079 }
1080
1081 /*
1082 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1083 */
1084 int
1085 nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1086 {
1087 int error;
1088
1089 /* log a warning if connect is taking a while */
1090 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
1091 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1092 nss->nss_flags |= NSS_WARNED;
1093 }
1094 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1095 return EINTR;
1096 }
1097 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1098 return error;
1099 }
1100
1101 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1102 if (nss->nss_last >= 0) {
1103 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
1104 }
1105
1106 return 0;
1107 }
1108
1109
1110 /*
1111 * Continue the socket search until we have something to report.
1112 */
1113 int
1114 nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1115 {
1116 struct nfs_socket *nso;
1117 struct timeval now;
1118 int error;
1119 int verbose = (nss->nss_flags & NSS_VERBOSE);
1120
1121 loop:
1122 microuptime(&now);
1123 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1124
1125 /* add a new socket to the socket list if needed and available */
1126 error = nfs_connect_search_new_socket(nmp, nss, &now);
1127 if (error) {
1128 NFS_SOCK_DBG("nfs connect returned %d\n", error);
1129 return error;
1130 }
1131
1132 /* check each active socket on the list and try to push it along */
1133 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1134 lck_mtx_lock(&nso->nso_lock);
1135
1136 /* If not connected connect it */
1137 if (!(nso->nso_flags & NSO_CONNECTED)) {
1138 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1139 lck_mtx_unlock(&nso->nso_lock);
1140 continue;
1141 }
1142 }
1143
1144 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1145 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1146 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
1147 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1148 lck_mtx_unlock(&nso->nso_lock);
1149 continue;
1150 }
1151 }
1152
1153 /* Has the socket been verified by the up call routine? */
1154 if (nso->nso_flags & NSO_VERIFIED) {
1155 /* WOOHOO!! This socket looks good! */
1156 nfs_connect_search_socket_found(nmp, nss, nso);
1157 lck_mtx_unlock(&nso->nso_lock);
1158 break;
1159 }
1160 lck_mtx_unlock(&nso->nso_lock);
1161 }
1162
1163 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1164 nfs_connect_search_socket_reap(nmp, nss, &now);
1165
1166 /*
1167 * Keep looping if we haven't found a socket yet and we have more
1168 * sockets to (continue to) try.
1169 */
1170 error = 0;
1171 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1172 error = nfs_connect_search_check(nmp, nss, &now);
1173 if (!error) {
1174 goto loop;
1175 }
1176 }
1177
1178 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1179 return error;
1180 }
1181
1182 /*
1183 * Initialize a new NFS connection.
1184 *
1185 * Search for a location to connect a socket to and initialize the connection.
1186 *
1187 * An NFS mount may have multiple locations/servers/addresses available.
1188 * We attempt to connect to each one asynchronously and will start
1189 * several sockets in parallel if other locations are slow to answer.
1190 * We'll use the first NFS socket we can successfully set up.
1191 *
1192 * The search may involve contacting the portmapper service first.
1193 *
1194 * A mount's initial connection may require negotiating some parameters such
1195 * as socket type and NFS version.
1196 */
1197
1198 int
1199 nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1200 {
1201 struct nfs_socket_search nss;
1202 struct nfs_socket *nso, *nsonfs;
1203 struct sockaddr_storage ss;
1204 struct sockaddr *saddr, *oldsaddr;
1205 sock_upcall upcall;
1206 #if CONFIG_NFS4
1207 struct timeval now;
1208 #endif
1209 struct timeval start;
1210 int error, savederror, nfsvers;
1211 int tryv4 = 1;
1212 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
1213 fhandle_t *fh = NULL;
1214 char *path = NULL;
1215 in_port_t port;
1216 int addrtotal = 0;
1217
1218 /* paranoia... check that we have at least one address in the locations */
1219 uint32_t loc, serv;
1220 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1221 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
1222 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
1223 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
1224 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1225 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1226 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1227 }
1228 }
1229 }
1230
1231 if (addrtotal == 0) {
1232 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1233 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1234 return EINVAL;
1235 } else {
1236 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1237 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1238 }
1239
1240 lck_mtx_lock(&nmp->nm_lock);
1241 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1242 nmp->nm_nss = &nss;
1243 lck_mtx_unlock(&nmp->nm_lock);
1244 microuptime(&start);
1245 savederror = error = 0;
1246
1247 tryagain:
1248 /* initialize socket search state */
1249 bzero(&nss, sizeof(nss));
1250 nss.nss_addrcnt = addrtotal;
1251 nss.nss_error = savederror;
1252 TAILQ_INIT(&nss.nss_socklist);
1253 nss.nss_sotype = sotype;
1254 nss.nss_startloc = nmp->nm_locations.nl_current;
1255 nss.nss_timestamp = start.tv_sec;
1256 nss.nss_timeo = timeo;
1257 if (verbose) {
1258 nss.nss_flags |= NSS_VERBOSE;
1259 }
1260
1261 /* First time connecting, we may need to negotiate some things */
1262 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1263 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1264 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1265 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
1266 if (!nmp->nm_vers) {
1267 /* No NFS version specified... */
1268 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1269 #if CONFIG_NFS4
1270 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1271 nss.nss_port = NFS_PORT;
1272 nss.nss_protocol = NFS_PROG;
1273 nss.nss_version = 4;
1274 nss.nss_flags |= NSS_FALLBACK2PMAP;
1275 } else {
1276 #endif
1277 /* ...connect to portmapper first if we (may) need any ports. */
1278 nss.nss_port = PMAPPORT;
1279 nss.nss_protocol = PMAPPROG;
1280 nss.nss_version = 0;
1281 #if CONFIG_NFS4
1282 }
1283 #endif
1284 } else {
1285 /* ...connect to NFS port first. */
1286 nss.nss_port = nmp->nm_nfsport;
1287 nss.nss_protocol = NFS_PROG;
1288 nss.nss_version = 0;
1289 }
1290 #if CONFIG_NFS4
1291 } else if (nmp->nm_vers >= NFS_VER4) {
1292 if (tryv4) {
1293 /* For NFSv4, we use the given (or default) port. */
1294 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1295 nss.nss_protocol = NFS_PROG;
1296 nss.nss_version = 4;
1297 /*
1298 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1299 * if no port is specified on the mount;
1300 * Note nm_vers is set so we will only try NFS_VER4.
1301 */
1302 if (!nmp->nm_nfsport) {
1303 nss.nss_flags |= NSS_FALLBACK2PMAP;
1304 }
1305 } else {
1306 nss.nss_port = PMAPPORT;
1307 nss.nss_protocol = PMAPPROG;
1308 nss.nss_version = 0;
1309 }
1310 #endif
1311 } else {
1312 /* For NFSv3/v2... */
1313 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1314 /* ...connect to portmapper first if we need any ports. */
1315 nss.nss_port = PMAPPORT;
1316 nss.nss_protocol = PMAPPROG;
1317 nss.nss_version = 0;
1318 } else {
1319 /* ...connect to NFS port first. */
1320 nss.nss_port = nmp->nm_nfsport;
1321 nss.nss_protocol = NFS_PROG;
1322 nss.nss_version = nmp->nm_vers;
1323 }
1324 }
1325 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1326 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1327 nss.nss_protocol, nss.nss_version);
1328 } else {
1329 /* we've connected before, just connect to NFS port */
1330 if (!nmp->nm_nfsport) {
1331 /* need to ask portmapper which port that would be */
1332 nss.nss_port = PMAPPORT;
1333 nss.nss_protocol = PMAPPROG;
1334 nss.nss_version = 0;
1335 } else {
1336 nss.nss_port = nmp->nm_nfsport;
1337 nss.nss_protocol = NFS_PROG;
1338 nss.nss_version = nmp->nm_vers;
1339 }
1340 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1341 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1342 nss.nss_protocol, nss.nss_version);
1343 }
1344
1345 /* Set next location to first valid location. */
1346 /* If start location is invalid, find next location. */
1347 nss.nss_nextloc = nss.nss_startloc;
1348 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1349 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1350 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1351 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
1352 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1353 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1354 return ENOENT;
1355 }
1356 }
1357 nss.nss_last = -1;
1358
1359 keepsearching:
1360
1361 error = nfs_connect_search_loop(nmp, &nss);
1362 if (error || !nss.nss_sock) {
1363 /* search failed */
1364 nfs_socket_search_cleanup(&nss);
1365 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1366 tryv4 = 0;
1367 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1368 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1369 goto tryagain;
1370 }
1371
1372 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1373 /* Try using UDP */
1374 sotype = SOCK_DGRAM;
1375 savederror = nss.nss_error;
1376 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1377 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1378 goto tryagain;
1379 }
1380 if (!error) {
1381 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
1382 }
1383 lck_mtx_lock(&nmp->nm_lock);
1384 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1385 nmp->nm_nss = NULL;
1386 lck_mtx_unlock(&nmp->nm_lock);
1387 if (nss.nss_flags & NSS_WARNED) {
1388 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
1389 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1390 }
1391 if (fh) {
1392 FREE(fh, M_TEMP);
1393 }
1394 if (path) {
1395 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1396 }
1397 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1398 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1399 return error;
1400 }
1401
1402 /* try to use nss_sock */
1403 nso = nss.nss_sock;
1404 nss.nss_sock = NULL;
1405
1406 /* We may be speaking to portmap first... to determine port(s). */
1407 if (nso->nso_saddr->sa_family == AF_INET) {
1408 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
1409 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1410 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
1411 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1412 if (nso->nso_protocol == PMAPPROG) {
1413 port = PMAPPORT;
1414 }
1415 }
1416
1417 if (port == PMAPPORT) {
1418 /* Use this portmapper port to get the port #s we need. */
1419 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1420 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1421
1422 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1423 sock_setupcall(nso->nso_so, NULL, NULL);
1424
1425 /* Set up socket address and port for NFS socket. */
1426 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1427
1428 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1429 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
1430 if (!(port = nmp->nm_nfsport)) {
1431 if (ss.ss_family == AF_INET) {
1432 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
1433 } else if (ss.ss_family == AF_INET6) {
1434 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
1435 } else if (ss.ss_family == AF_LOCAL) {
1436 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1437 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1438 }
1439 }
1440 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
1441 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
1442 continue; /* Wrong version */
1443 }
1444 #if CONFIG_NFS4
1445 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
1446 continue; /* NFSv4 does not do UDP */
1447 }
1448 #endif
1449 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1450 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1451 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1452 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1453 error = 0;
1454 } else {
1455 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1456 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1457 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1458 }
1459 if (!error) {
1460 if (ss.ss_family == AF_INET) {
1461 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1462 } else if (ss.ss_family == AF_INET6) {
1463 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1464 } else if (ss.ss_family == AF_LOCAL) {
1465 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
1466 }
1467 if (!port) {
1468 error = EPROGUNAVAIL;
1469 }
1470 #if CONFIG_NFS4
1471 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
1472 continue; /* We already tried this */
1473 }
1474 #endif
1475 }
1476 if (!error) {
1477 break;
1478 }
1479 }
1480 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
1481 error = EPROGUNAVAIL;
1482 }
1483 if (error) {
1484 nfs_socket_search_update_error(&nss, error);
1485 nfs_socket_destroy(nso);
1486 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
1487 goto keepsearching;
1488 }
1489 } else if (nmp->nm_nfs_localport) {
1490 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1491 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
1492 }
1493
1494 /* Create NFS protocol socket and add it to the list of sockets. */
1495 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1496 if (ss.ss_family == AF_LOCAL) {
1497 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1498 }
1499 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
1500 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
1501 if (error) {
1502 nfs_socket_search_update_error(&nss, error);
1503 nfs_socket_destroy(nso);
1504 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
1505 goto keepsearching;
1506 }
1507 nsonfs->nso_location = nso->nso_location;
1508 nsonfs->nso_wake = &nss;
1509 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1510 if (error) {
1511 nfs_socket_search_update_error(&nss, error);
1512 nfs_socket_destroy(nsonfs);
1513 nfs_socket_destroy(nso);
1514 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
1515 goto keepsearching;
1516 }
1517 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1518 nss.nss_sockcnt++;
1519 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1520 /* Set up socket address and port for MOUNT socket. */
1521 error = 0;
1522 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1523 port = nmp->nm_mountport;
1524 NFS_SOCK_DBG("mount port = %d\n", port);
1525 if (ss.ss_family == AF_INET) {
1526 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1527 } else if (ss.ss_family == AF_INET6) {
1528 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1529 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1530 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1531 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1532 }
1533 if (!port) {
1534 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1535 /* If NFS version is unknown, optimistically choose for NFSv3. */
1536 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1537 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
1538 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
1539 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1540 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
1541 }
1542 if (!error) {
1543 if (ss.ss_family == AF_INET) {
1544 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1545 } else if (ss.ss_family == AF_INET6) {
1546 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1547 } else if (ss.ss_family == AF_LOCAL) {
1548 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
1549 }
1550 if (!port) {
1551 error = EPROGUNAVAIL;
1552 }
1553 }
1554 /* create sockaddr for MOUNT */
1555 if (!error) {
1556 MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO);
1557 }
1558 if (!error && !nsonfs->nso_saddr2) {
1559 error = ENOMEM;
1560 }
1561 if (!error) {
1562 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
1563 }
1564 if (error) {
1565 NFS_SOCK_DBG("Could not create mount sockaet address %d", error);
1566 lck_mtx_lock(&nsonfs->nso_lock);
1567 nsonfs->nso_error = error;
1568 nsonfs->nso_flags |= NSO_DEAD;
1569 lck_mtx_unlock(&nsonfs->nso_lock);
1570 }
1571 }
1572 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
1573 nfs_socket_destroy(nso);
1574 goto keepsearching;
1575 }
1576
1577 /* nso is an NFS socket */
1578 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1579
1580 /* If NFS version wasn't specified, it was determined during the connect. */
1581 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1582
1583 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1584 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1585 error = 0;
1586 saddr = nso->nso_saddr2;
1587 if (!saddr) {
1588 /* Need sockaddr for MOUNT port */
1589 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
1590 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1591 port = nmp->nm_mountport;
1592 if (ss.ss_family == AF_INET) {
1593 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1594 } else if (ss.ss_family == AF_INET6) {
1595 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1596 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1597 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1598 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1599 }
1600 if (!port) {
1601 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1602 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1603 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
1604 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1605 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
1606 if (ss.ss_family == AF_INET) {
1607 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1608 } else if (ss.ss_family == AF_INET6) {
1609 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1610 }
1611 }
1612 if (!error) {
1613 if (port) {
1614 saddr = (struct sockaddr*)&ss;
1615 } else {
1616 error = EPROGUNAVAIL;
1617 }
1618 }
1619 }
1620 if (saddr) {
1621 MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO);
1622 }
1623 if (saddr && fh) {
1624 MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
1625 }
1626 if (!saddr || !fh || !path) {
1627 if (!error) {
1628 error = ENOMEM;
1629 }
1630 if (fh) {
1631 FREE(fh, M_TEMP);
1632 }
1633 if (path) {
1634 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1635 }
1636 fh = NULL;
1637 path = NULL;
1638 nfs_socket_search_update_error(&nss, error);
1639 nfs_socket_destroy(nso);
1640 goto keepsearching;
1641 }
1642 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1643 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
1644 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
1645 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1646 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1647 if (!error) {
1648 /* Make sure we can agree on a security flavor. */
1649 int o, s; /* indices into mount option and server security flavor lists */
1650 int found = 0;
1651
1652 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1653 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1654 nmp->nm_servsec.count = 1;
1655 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1656 }
1657 if (nmp->nm_sec.count) {
1658 /* Choose the first flavor in our list that the server supports. */
1659 if (!nmp->nm_servsec.count) {
1660 /* we don't know what the server supports, just use our first choice */
1661 nmp->nm_auth = nmp->nm_sec.flavors[0];
1662 found = 1;
1663 }
1664 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1665 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
1666 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1667 nmp->nm_auth = nmp->nm_sec.flavors[o];
1668 found = 1;
1669 }
1670 }
1671 }
1672 } else {
1673 /* Choose the first one we support from the server's list. */
1674 if (!nmp->nm_servsec.count) {
1675 nmp->nm_auth = RPCAUTH_SYS;
1676 found = 1;
1677 }
1678 for (s = 0; s < nmp->nm_servsec.count; s++) {
1679 switch (nmp->nm_servsec.flavors[s]) {
1680 case RPCAUTH_SYS:
1681 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1682 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
1683 found = 0;
1684 }
1685 case RPCAUTH_NONE:
1686 case RPCAUTH_KRB5:
1687 case RPCAUTH_KRB5I:
1688 case RPCAUTH_KRB5P:
1689 if (!found) {
1690 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1691 found = 1;
1692 }
1693 break;
1694 }
1695 }
1696 }
1697 error = !found ? EAUTH : 0;
1698 }
1699 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1700 path = NULL;
1701 if (error) {
1702 nfs_socket_search_update_error(&nss, error);
1703 FREE(fh, M_TEMP);
1704 fh = NULL;
1705 nfs_socket_destroy(nso);
1706 goto keepsearching;
1707 }
1708 if (nmp->nm_fh) {
1709 FREE(nmp->nm_fh, M_TEMP);
1710 }
1711 nmp->nm_fh = fh;
1712 fh = NULL;
1713 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1714 }
1715
1716 /* put the real upcall in place */
1717 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1718 error = sock_setupcall(nso->nso_so, upcall, nmp);
1719 if (error) {
1720 nfs_socket_search_update_error(&nss, error);
1721 nfs_socket_destroy(nso);
1722 goto keepsearching;
1723 }
1724
1725 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1726 /* set mntfromname to this location */
1727 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
1728 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
1729 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1730 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1731 }
1732 /* some negotiated values need to remain unchanged for the life of the mount */
1733 if (!nmp->nm_sotype) {
1734 nmp->nm_sotype = nso->nso_sotype;
1735 }
1736 if (!nmp->nm_vers) {
1737 nmp->nm_vers = nfsvers;
1738 #if CONFIG_NFS4
1739 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1740 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1741 if (nso->nso_saddr->sa_family == AF_INET) {
1742 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
1743 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1744 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
1745 } else {
1746 port = 0;
1747 }
1748 if (port == NFS_PORT) {
1749 nmp->nm_nfsport = NFS_PORT;
1750 }
1751 }
1752 #endif
1753 }
1754 #if CONFIG_NFS4
1755 /* do some version-specific pre-mount set up */
1756 if (nmp->nm_vers >= NFS_VER4) {
1757 microtime(&now);
1758 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
1759 if (!NMFLAG(nmp, NOCALLBACK)) {
1760 nfs4_mount_callback_setup(nmp);
1761 }
1762 }
1763 #endif
1764 }
1765
1766 /* Initialize NFS socket state variables */
1767 lck_mtx_lock(&nmp->nm_lock);
1768 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
1769 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1770 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
1771 nmp->nm_sdrtt[3] = 0;
1772 if (nso->nso_sotype == SOCK_DGRAM) {
1773 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
1774 nmp->nm_sent = 0;
1775 } else if (nso->nso_sotype == SOCK_STREAM) {
1776 nmp->nm_timeouts = 0;
1777 }
1778 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1779 nmp->nm_sockflags |= NMSOCK_SETUP;
1780 /* move the socket to the mount structure */
1781 nmp->nm_nso = nso;
1782 oldsaddr = nmp->nm_saddr;
1783 nmp->nm_saddr = nso->nso_saddr;
1784 lck_mtx_unlock(&nmp->nm_lock);
1785 error = nfs_connect_setup(nmp);
1786 lck_mtx_lock(&nmp->nm_lock);
1787 nmp->nm_sockflags &= ~NMSOCK_SETUP;
1788 if (!error) {
1789 nmp->nm_sockflags |= NMSOCK_READY;
1790 wakeup(&nmp->nm_sockflags);
1791 }
1792 if (error) {
1793 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1794 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1795 nfs_socket_search_update_error(&nss, error);
1796 nmp->nm_saddr = oldsaddr;
1797 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1798 /* undo settings made prior to setup */
1799 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
1800 nmp->nm_sotype = 0;
1801 }
1802 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_VERSION)) {
1803 #if CONFIG_NFS4
1804 if (nmp->nm_vers >= NFS_VER4) {
1805 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1806 nmp->nm_nfsport = 0;
1807 }
1808 if (nmp->nm_cbid) {
1809 nfs4_mount_callback_shutdown(nmp);
1810 }
1811 if (IS_VALID_CRED(nmp->nm_mcred)) {
1812 kauth_cred_unref(&nmp->nm_mcred);
1813 }
1814 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1815 }
1816 #endif
1817 nmp->nm_vers = 0;
1818 }
1819 }
1820 lck_mtx_unlock(&nmp->nm_lock);
1821 nmp->nm_nso = NULL;
1822 nfs_socket_destroy(nso);
1823 goto keepsearching;
1824 }
1825
1826 /* update current location */
1827 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1828 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1829 /* server has changed, we should initiate failover/recovery */
1830 // XXX
1831 }
1832 nmp->nm_locations.nl_current = nso->nso_location;
1833 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1834
1835 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1836 /* We have now successfully connected... make a note of it. */
1837 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1838 }
1839
1840 lck_mtx_unlock(&nmp->nm_lock);
1841 if (oldsaddr) {
1842 FREE(oldsaddr, M_SONAME);
1843 }
1844
1845 if (nss.nss_flags & NSS_WARNED) {
1846 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
1847 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1848 }
1849
1850 nmp->nm_nss = NULL;
1851 nfs_socket_search_cleanup(&nss);
1852 if (fh) {
1853 FREE(fh, M_TEMP);
1854 }
1855 if (path) {
1856 FREE_ZONE(path, MAXPATHLEN, M_NAMEI);
1857 }
1858 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1859 return 0;
1860 }
1861
1862
1863 /* setup & confirm socket connection is functional */
1864 int
1865 nfs_connect_setup(
1866 #if !CONFIG_NFS4
1867 __unused
1868 #endif
1869 struct nfsmount *nmp)
1870 {
1871 int error = 0;
1872 #if CONFIG_NFS4
1873 if (nmp->nm_vers >= NFS_VER4) {
1874 if (nmp->nm_state & NFSSTA_CLIENTID) {
1875 /* first, try to renew our current state */
1876 error = nfs4_renew(nmp, R_SETUP);
1877 if ((error == NFSERR_ADMIN_REVOKED) ||
1878 (error == NFSERR_CB_PATH_DOWN) ||
1879 (error == NFSERR_EXPIRED) ||
1880 (error == NFSERR_LEASE_MOVED) ||
1881 (error == NFSERR_STALE_CLIENTID)) {
1882 lck_mtx_lock(&nmp->nm_lock);
1883 nfs_need_recover(nmp, error);
1884 lck_mtx_unlock(&nmp->nm_lock);
1885 }
1886 }
1887 error = nfs4_setclientid(nmp);
1888 }
1889 #endif
1890 return error;
1891 }
1892
1893 /*
1894 * NFS socket reconnect routine:
1895 * Called when a connection is broken.
1896 * - disconnect the old socket
1897 * - nfs_connect() again
1898 * - set R_MUSTRESEND for all outstanding requests on mount point
1899 * If this fails the mount point is DEAD!
1900 */
1901 int
1902 nfs_reconnect(struct nfsmount *nmp)
1903 {
1904 struct nfsreq *rq;
1905 struct timeval now;
1906 thread_t thd = current_thread();
1907 int error, wentdown = 0, verbose = 1;
1908 time_t lastmsg;
1909 int timeo;
1910
1911 microuptime(&now);
1912 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1913
1914 nfs_disconnect(nmp);
1915
1916
1917 lck_mtx_lock(&nmp->nm_lock);
1918 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1919 lck_mtx_unlock(&nmp->nm_lock);
1920
1921 while ((error = nfs_connect(nmp, verbose, timeo))) {
1922 verbose = 0;
1923 nfs_disconnect(nmp);
1924 if ((error == EINTR) || (error == ERESTART)) {
1925 return EINTR;
1926 }
1927 if (error == EIO) {
1928 return EIO;
1929 }
1930 microuptime(&now);
1931 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1932 lastmsg = now.tv_sec;
1933 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
1934 wentdown = 1;
1935 }
1936 lck_mtx_lock(&nmp->nm_lock);
1937 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1938 /* we're not yet completely mounted and */
1939 /* we can't reconnect, so we fail */
1940 lck_mtx_unlock(&nmp->nm_lock);
1941 NFS_SOCK_DBG("Not mounted returning %d\n", error);
1942 return error;
1943 }
1944
1945 if (nfs_mount_check_dead_timeout(nmp)) {
1946 nfs_mount_make_zombie(nmp);
1947 lck_mtx_unlock(&nmp->nm_lock);
1948 return ENXIO;
1949 }
1950
1951 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1952 lck_mtx_unlock(&nmp->nm_lock);
1953 return error;
1954 }
1955 lck_mtx_unlock(&nmp->nm_lock);
1956 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1957 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1958 return error;
1959 }
1960 }
1961
1962 if (wentdown) {
1963 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
1964 }
1965
1966 /*
1967 * Loop through outstanding request list and mark all requests
1968 * as needing a resend. (Though nfs_need_reconnect() probably
1969 * marked them all already.)
1970 */
1971 lck_mtx_lock(nfs_request_mutex);
1972 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
1973 if (rq->r_nmp == nmp) {
1974 lck_mtx_lock(&rq->r_mtx);
1975 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
1976 rq->r_flags |= R_MUSTRESEND;
1977 rq->r_rtt = -1;
1978 wakeup(rq);
1979 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
1980 nfs_asyncio_resend(rq);
1981 }
1982 }
1983 lck_mtx_unlock(&rq->r_mtx);
1984 }
1985 }
1986 lck_mtx_unlock(nfs_request_mutex);
1987 return 0;
1988 }
1989
1990 /*
1991 * NFS disconnect. Clean up and unlink.
1992 */
1993 void
1994 nfs_disconnect(struct nfsmount *nmp)
1995 {
1996 struct nfs_socket *nso;
1997
1998 lck_mtx_lock(&nmp->nm_lock);
1999 tryagain:
2000 if (nmp->nm_nso) {
2001 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2002 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2003 nmp->nm_state |= NFSSTA_WANTSND;
2004 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
2005 goto tryagain;
2006 }
2007 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
2008 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
2009 goto tryagain;
2010 }
2011 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2012 nmp->nm_sockflags &= ~NMSOCK_READY;
2013 nso = nmp->nm_nso;
2014 nmp->nm_nso = NULL;
2015 if (nso->nso_saddr == nmp->nm_saddr) {
2016 nso->nso_saddr = NULL;
2017 }
2018 lck_mtx_unlock(&nmp->nm_lock);
2019 nfs_socket_destroy(nso);
2020 lck_mtx_lock(&nmp->nm_lock);
2021 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2022 lck_mtx_unlock(&nmp->nm_lock);
2023 } else {
2024 lck_mtx_unlock(&nmp->nm_lock);
2025 }
2026 }
2027
2028 /*
2029 * mark an NFS mount as needing a reconnect/resends.
2030 */
2031 void
2032 nfs_need_reconnect(struct nfsmount *nmp)
2033 {
2034 struct nfsreq *rq;
2035
2036 lck_mtx_lock(&nmp->nm_lock);
2037 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2038 lck_mtx_unlock(&nmp->nm_lock);
2039
2040 /*
2041 * Loop through outstanding request list and
2042 * mark all requests as needing a resend.
2043 */
2044 lck_mtx_lock(nfs_request_mutex);
2045 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2046 if (rq->r_nmp == nmp) {
2047 lck_mtx_lock(&rq->r_mtx);
2048 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2049 rq->r_flags |= R_MUSTRESEND;
2050 rq->r_rtt = -1;
2051 wakeup(rq);
2052 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2053 nfs_asyncio_resend(rq);
2054 }
2055 }
2056 lck_mtx_unlock(&rq->r_mtx);
2057 }
2058 }
2059 lck_mtx_unlock(nfs_request_mutex);
2060 }
2061
2062
2063 /*
2064 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2065 */
2066 void
2067 nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
2068 {
2069 struct nfsmount *nmp = arg;
2070 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2071 thread_t thd = current_thread();
2072 struct nfsreq *req;
2073 struct timeval now;
2074 int error, dofinish;
2075 nfsnode_t np;
2076 int do_reconnect_sleep = 0;
2077
2078 lck_mtx_lock(&nmp->nm_lock);
2079 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
2080 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2081 !LIST_EMPTY(&nmp->nm_monlist) ||
2082 nmp->nm_deadto_start ||
2083 (nmp->nm_state & NFSSTA_RECOVER) ||
2084 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2085 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2086 break;
2087 }
2088 /* do reconnect, if necessary */
2089 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2090 if (nmp->nm_reconnect_start <= 0) {
2091 microuptime(&now);
2092 nmp->nm_reconnect_start = now.tv_sec;
2093 }
2094 lck_mtx_unlock(&nmp->nm_lock);
2095 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2096 /*
2097 * XXX We don't want to call reconnect again right away if returned errors
2098 * before that may not have blocked. This has caused spamming null procs
2099 * from machines in the pass.
2100 */
2101 if (do_reconnect_sleep) {
2102 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
2103 }
2104 error = nfs_reconnect(nmp);
2105 if (error) {
2106 int lvl = 7;
2107 if (error == EIO || error == EINTR) {
2108 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2109 }
2110 NFS_DBG(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
2111 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
2112 } else {
2113 nmp->nm_reconnect_start = 0;
2114 do_reconnect_sleep = 0;
2115 }
2116 lck_mtx_lock(&nmp->nm_lock);
2117 }
2118 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2119 (nmp->nm_state & NFSSTA_RECOVER) &&
2120 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2121 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2122 /* perform state recovery */
2123 lck_mtx_unlock(&nmp->nm_lock);
2124 nfs_recover(nmp);
2125 lck_mtx_lock(&nmp->nm_lock);
2126 }
2127 #if CONFIG_NFS4
2128 /* handle NFSv4 delegation returns */
2129 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2130 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2131 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
2132 lck_mtx_unlock(&nmp->nm_lock);
2133 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
2134 lck_mtx_lock(&nmp->nm_lock);
2135 }
2136 #endif
2137 /* do resends, if necessary/possible */
2138 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
2139 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2140 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2141 if (req->r_resendtime) {
2142 microuptime(&now);
2143 }
2144 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2145 req = TAILQ_NEXT(req, r_rchain);
2146 }
2147 if (!req) {
2148 break;
2149 }
2150 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2151 req->r_rchain.tqe_next = NFSREQNOLIST;
2152 lck_mtx_unlock(&nmp->nm_lock);
2153 lck_mtx_lock(&req->r_mtx);
2154 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2155 if (req->r_error || req->r_nmrep.nmc_mhead) {
2156 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2157 req->r_flags &= ~R_RESENDQ;
2158 wakeup(req);
2159 lck_mtx_unlock(&req->r_mtx);
2160 if (dofinish) {
2161 nfs_asyncio_finish(req);
2162 }
2163 nfs_request_rele(req);
2164 lck_mtx_lock(&nmp->nm_lock);
2165 continue;
2166 }
2167 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2168 req->r_flags &= ~R_RESTART;
2169 req->r_resendtime = 0;
2170 lck_mtx_unlock(&req->r_mtx);
2171 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2172 nfs_reqdequeue(req);
2173 #if CONFIG_NFS_GSS
2174 if (nfs_request_using_gss(req)) {
2175 nfs_gss_clnt_rpcdone(req);
2176 error = nfs_gss_clnt_args_restore(req);
2177 if (error == ENEEDAUTH) {
2178 req->r_xid = 0;
2179 }
2180 }
2181 #endif /* CONFIG_NFS_GSS */
2182 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2183 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2184 req->r_flags, req->r_rtt);
2185 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2186 if (!error) {
2187 error = nfs_request_add_header(req);
2188 }
2189 if (!error) {
2190 error = nfs_request_send(req, 0);
2191 }
2192 lck_mtx_lock(&req->r_mtx);
2193 if (req->r_flags & R_RESENDQ) {
2194 req->r_flags &= ~R_RESENDQ;
2195 }
2196 if (error) {
2197 req->r_error = error;
2198 }
2199 wakeup(req);
2200 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2201 lck_mtx_unlock(&req->r_mtx);
2202 if (dofinish) {
2203 nfs_asyncio_finish(req);
2204 }
2205 nfs_request_rele(req);
2206 lck_mtx_lock(&nmp->nm_lock);
2207 error = 0;
2208 continue;
2209 }
2210 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2211 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2212 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2213 if (!error) {
2214 req->r_flags |= R_SENDING;
2215 lck_mtx_unlock(&req->r_mtx);
2216 error = nfs_send(req, 0);
2217 lck_mtx_lock(&req->r_mtx);
2218 if (!error) {
2219 if (req->r_flags & R_RESENDQ) {
2220 req->r_flags &= ~R_RESENDQ;
2221 }
2222 wakeup(req);
2223 lck_mtx_unlock(&req->r_mtx);
2224 nfs_request_rele(req);
2225 lck_mtx_lock(&nmp->nm_lock);
2226 continue;
2227 }
2228 }
2229 req->r_error = error;
2230 if (req->r_flags & R_RESENDQ) {
2231 req->r_flags &= ~R_RESENDQ;
2232 }
2233 wakeup(req);
2234 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2235 lck_mtx_unlock(&req->r_mtx);
2236 if (dofinish) {
2237 nfs_asyncio_finish(req);
2238 }
2239 nfs_request_rele(req);
2240 lck_mtx_lock(&nmp->nm_lock);
2241 }
2242 if (nfs_mount_check_dead_timeout(nmp)) {
2243 nfs_mount_make_zombie(nmp);
2244 break;
2245 }
2246
2247 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2248 break;
2249 }
2250 /* check monitored nodes, if necessary/possible */
2251 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2252 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2253 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
2254 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
2255 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2256 break;
2257 }
2258 np->n_mflag |= NMMONSCANINPROG;
2259 lck_mtx_unlock(&nmp->nm_lock);
2260 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2261 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
2262 nfs_data_update_size(np, 0);
2263 }
2264 lck_mtx_lock(&nmp->nm_lock);
2265 np->n_mflag &= ~NMMONSCANINPROG;
2266 if (np->n_mflag & NMMONSCANWANT) {
2267 np->n_mflag &= ~NMMONSCANWANT;
2268 wakeup(&np->n_mflag);
2269 }
2270 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
2271 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2272 break;
2273 }
2274 }
2275 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
2276 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
2277 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
2278 }
2279 }
2280 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
2281 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
2282 (nmp->nm_state & NFSSTA_RECOVER)) {
2283 ts.tv_sec = 1;
2284 } else {
2285 ts.tv_sec = 5;
2286 }
2287 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2288 }
2289 }
2290
2291 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2292 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2293 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
2294 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2295 lck_mtx_unlock(&nmp->nm_lock);
2296 nfs3_umount_rpc(nmp, vfs_context_kernel(),
2297 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
2298 lck_mtx_lock(&nmp->nm_lock);
2299 }
2300
2301 if (nmp->nm_sockthd == thd) {
2302 nmp->nm_sockthd = NULL;
2303 }
2304 lck_mtx_unlock(&nmp->nm_lock);
2305 wakeup(&nmp->nm_sockthd);
2306 thread_terminate(thd);
2307 }
2308
2309 /* start or wake a mount's socket thread */
2310 void
2311 nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2312 {
2313 if (nmp->nm_sockthd) {
2314 wakeup(&nmp->nm_sockthd);
2315 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2316 thread_deallocate(nmp->nm_sockthd);
2317 }
2318 }
2319
2320 /*
2321 * Check if we should mark the mount dead because the
2322 * unresponsive mount has reached the dead timeout.
2323 * (must be called with nmp locked)
2324 */
2325 int
2326 nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2327 {
2328 struct timeval now;
2329
2330 if (nmp->nm_state & NFSSTA_DEAD) {
2331 return 1;
2332 }
2333 if (nmp->nm_deadto_start == 0) {
2334 return 0;
2335 }
2336 nfs_is_squishy(nmp);
2337 if (nmp->nm_curdeadtimeout <= 0) {
2338 return 0;
2339 }
2340 microuptime(&now);
2341 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
2342 return 0;
2343 }
2344 return 1;
2345 }
2346
2347 /*
2348 * Call nfs_mount_zombie to remove most of the
2349 * nfs state for the mount, and then ask to be forcibly unmounted.
2350 *
2351 * Assumes the nfs mount structure lock nm_lock is held.
2352 */
2353
2354 void
2355 nfs_mount_make_zombie(struct nfsmount *nmp)
2356 {
2357 fsid_t fsid;
2358
2359 if (!nmp) {
2360 return;
2361 }
2362
2363 if (nmp->nm_state & NFSSTA_DEAD) {
2364 return;
2365 }
2366
2367 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
2368 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
2369 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2370 lck_mtx_unlock(&nmp->nm_lock);
2371 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2372 vfs_event_signal(&fsid, VQ_DEAD, 0);
2373 lck_mtx_lock(&nmp->nm_lock);
2374 }
2375
2376
2377 /*
2378 * NFS callback channel socket state
2379 */
2380 struct nfs_callback_socket {
2381 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
2382 socket_t ncbs_so; /* the socket */
2383 struct sockaddr_storage ncbs_saddr; /* socket address */
2384 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2385 time_t ncbs_stamp; /* last accessed at */
2386 uint32_t ncbs_flags; /* see below */
2387 };
2388 #define NCBSOCK_UPCALL 0x0001
2389 #define NCBSOCK_UPCALLWANT 0x0002
2390 #define NCBSOCK_DEAD 0x0004
2391
2392 #if CONFIG_NFS4
2393 /*
2394 * NFS callback channel state
2395 *
2396 * One listening socket for accepting socket connections from servers and
2397 * a list of connected sockets to handle callback requests on.
2398 * Mounts registered with the callback channel are assigned IDs and
2399 * put on a list so that the callback request handling code can match
2400 * the requests up with mounts.
2401 */
2402 socket_t nfs4_cb_so = NULL;
2403 socket_t nfs4_cb_so6 = NULL;
2404 in_port_t nfs4_cb_port = 0;
2405 in_port_t nfs4_cb_port6 = 0;
2406 uint32_t nfs4_cb_id = 0;
2407 uint32_t nfs4_cb_so_usecount = 0;
2408 TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2409 TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
2410
2411 int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2412
2413 /*
2414 * Set up the callback channel for the NFS mount.
2415 *
2416 * Initializes the callback channel socket state and
2417 * assigns a callback ID to the mount.
2418 */
2419 void
2420 nfs4_mount_callback_setup(struct nfsmount *nmp)
2421 {
2422 struct sockaddr_in sin;
2423 struct sockaddr_in6 sin6;
2424 socket_t so = NULL;
2425 socket_t so6 = NULL;
2426 struct timeval timeo;
2427 int error, on = 1;
2428 in_port_t port;
2429
2430 lck_mtx_lock(nfs_global_mutex);
2431 if (nfs4_cb_id == 0) {
2432 TAILQ_INIT(&nfs4_cb_mounts);
2433 TAILQ_INIT(&nfs4_cb_socks);
2434 nfs4_cb_id++;
2435 }
2436 nmp->nm_cbid = nfs4_cb_id++;
2437 if (nmp->nm_cbid == 0) {
2438 nmp->nm_cbid = nfs4_cb_id++;
2439 }
2440 nfs4_cb_so_usecount++;
2441 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2442
2443 if (nfs4_cb_so) {
2444 lck_mtx_unlock(nfs_global_mutex);
2445 return;
2446 }
2447
2448 /* IPv4 */
2449 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2450 if (error) {
2451 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
2452 goto fail;
2453 }
2454 so = nfs4_cb_so;
2455
2456 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2457 sin.sin_len = sizeof(struct sockaddr_in);
2458 sin.sin_family = AF_INET;
2459 sin.sin_addr.s_addr = htonl(INADDR_ANY);
2460 sin.sin_port = htons(nfs_callback_port); /* try to use specified port */
2461 error = sock_bind(so, (struct sockaddr *)&sin);
2462 if (error) {
2463 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
2464 goto fail;
2465 }
2466 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2467 if (error) {
2468 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
2469 goto fail;
2470 }
2471 nfs4_cb_port = ntohs(sin.sin_port);
2472
2473 error = sock_listen(so, 32);
2474 if (error) {
2475 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
2476 goto fail;
2477 }
2478
2479 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2480 timeo.tv_usec = 0;
2481 timeo.tv_sec = 60;
2482 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2483 if (error) {
2484 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
2485 }
2486 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2487 if (error) {
2488 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
2489 }
2490 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2491 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2492 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2493 error = 0;
2494
2495 /* IPv6 */
2496 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2497 if (error) {
2498 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2499 goto fail;
2500 }
2501 so6 = nfs4_cb_so6;
2502
2503 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2504 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2505 /* try to use specified port or same port as IPv4 */
2506 port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port;
2507 ipv6_bind_again:
2508 sin6.sin6_len = sizeof(struct sockaddr_in6);
2509 sin6.sin6_family = AF_INET6;
2510 sin6.sin6_addr = in6addr_any;
2511 sin6.sin6_port = htons(port);
2512 error = sock_bind(so6, (struct sockaddr *)&sin6);
2513 if (error) {
2514 if (port != nfs_callback_port) {
2515 /* if we simply tried to match the IPv4 port, then try any port */
2516 port = 0;
2517 goto ipv6_bind_again;
2518 }
2519 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2520 goto fail;
2521 }
2522 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2523 if (error) {
2524 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2525 goto fail;
2526 }
2527 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2528
2529 error = sock_listen(so6, 32);
2530 if (error) {
2531 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2532 goto fail;
2533 }
2534
2535 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2536 timeo.tv_usec = 0;
2537 timeo.tv_sec = 60;
2538 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2539 if (error) {
2540 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
2541 }
2542 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2543 if (error) {
2544 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
2545 }
2546 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2547 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2548 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2549 error = 0;
2550
2551 fail:
2552 if (error) {
2553 nfs4_cb_so = nfs4_cb_so6 = NULL;
2554 lck_mtx_unlock(nfs_global_mutex);
2555 if (so) {
2556 sock_shutdown(so, SHUT_RDWR);
2557 sock_close(so);
2558 }
2559 if (so6) {
2560 sock_shutdown(so6, SHUT_RDWR);
2561 sock_close(so6);
2562 }
2563 } else {
2564 lck_mtx_unlock(nfs_global_mutex);
2565 }
2566 }
2567
2568 /*
2569 * Shut down the callback channel for the NFS mount.
2570 *
2571 * Clears the mount's callback ID and releases the mounts
2572 * reference on the callback socket. Last reference dropped
2573 * will also shut down the callback socket(s).
2574 */
2575 void
2576 nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2577 {
2578 struct nfs_callback_socket *ncbsp;
2579 socket_t so, so6;
2580 struct nfs4_cb_sock_list cb_socks;
2581 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2582
2583 lck_mtx_lock(nfs_global_mutex);
2584 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2585 /* wait for any callbacks in progress to complete */
2586 while (nmp->nm_cbrefs) {
2587 msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts);
2588 }
2589 nmp->nm_cbid = 0;
2590 if (--nfs4_cb_so_usecount) {
2591 lck_mtx_unlock(nfs_global_mutex);
2592 return;
2593 }
2594 so = nfs4_cb_so;
2595 so6 = nfs4_cb_so6;
2596 nfs4_cb_so = nfs4_cb_so6 = NULL;
2597 TAILQ_INIT(&cb_socks);
2598 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2599 lck_mtx_unlock(nfs_global_mutex);
2600 if (so) {
2601 sock_shutdown(so, SHUT_RDWR);
2602 sock_close(so);
2603 }
2604 if (so6) {
2605 sock_shutdown(so6, SHUT_RDWR);
2606 sock_close(so6);
2607 }
2608 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2609 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2610 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2611 sock_close(ncbsp->ncbs_so);
2612 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2613 FREE(ncbsp, M_TEMP);
2614 }
2615 }
2616
2617 /*
2618 * Check periodically for stale/unused nfs callback sockets
2619 */
2620 #define NFS4_CB_TIMER_PERIOD 30
2621 #define NFS4_CB_IDLE_MAX 300
2622 void
2623 nfs4_callback_timer(__unused void *param0, __unused void *param1)
2624 {
2625 struct nfs_callback_socket *ncbsp, *nextncbsp;
2626 struct timeval now;
2627
2628 loop:
2629 lck_mtx_lock(nfs_global_mutex);
2630 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2631 nfs4_callback_timer_on = 0;
2632 lck_mtx_unlock(nfs_global_mutex);
2633 return;
2634 }
2635 microuptime(&now);
2636 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2637 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
2638 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
2639 continue;
2640 }
2641 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2642 lck_mtx_unlock(nfs_global_mutex);
2643 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2644 sock_close(ncbsp->ncbs_so);
2645 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2646 FREE(ncbsp, M_TEMP);
2647 goto loop;
2648 }
2649 nfs4_callback_timer_on = 1;
2650 nfs_interval_timer_start(nfs4_callback_timer_call,
2651 NFS4_CB_TIMER_PERIOD * 1000);
2652 lck_mtx_unlock(nfs_global_mutex);
2653 }
2654
2655 /*
2656 * Accept a new callback socket.
2657 */
2658 void
2659 nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2660 {
2661 socket_t newso = NULL;
2662 struct nfs_callback_socket *ncbsp;
2663 struct nfsmount *nmp;
2664 struct timeval timeo, now;
2665 int error, on = 1, ip;
2666
2667 if (so == nfs4_cb_so) {
2668 ip = 4;
2669 } else if (so == nfs4_cb_so6) {
2670 ip = 6;
2671 } else {
2672 return;
2673 }
2674
2675 /* allocate/initialize a new nfs_callback_socket */
2676 MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
2677 if (!ncbsp) {
2678 log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
2679 return;
2680 }
2681 bzero(ncbsp, sizeof(*ncbsp));
2682 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2683 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
2684
2685 /* accept a new socket */
2686 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
2687 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2688 nfs4_cb_rcv, ncbsp, &newso);
2689 if (error) {
2690 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
2691 FREE(ncbsp, M_TEMP);
2692 return;
2693 }
2694
2695 /* set up the new socket */
2696 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2697 timeo.tv_usec = 0;
2698 timeo.tv_sec = 60;
2699 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2700 if (error) {
2701 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
2702 }
2703 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2704 if (error) {
2705 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
2706 }
2707 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2708 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2709 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2710 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2711
2712 ncbsp->ncbs_so = newso;
2713 microuptime(&now);
2714 ncbsp->ncbs_stamp = now.tv_sec;
2715
2716 lck_mtx_lock(nfs_global_mutex);
2717
2718 /* add it to the list */
2719 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2720
2721 /* verify it's from a host we have mounted */
2722 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2723 /* check if socket's source address matches this mount's server address */
2724 if (!nmp->nm_saddr) {
2725 continue;
2726 }
2727 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2728 break;
2729 }
2730 }
2731 if (!nmp) { /* we don't want this socket, mark it dead */
2732 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2733 }
2734
2735 /* make sure the callback socket cleanup timer is running */
2736 /* (shorten the timer if we've got a socket we don't want) */
2737 if (!nfs4_callback_timer_on) {
2738 nfs4_callback_timer_on = 1;
2739 nfs_interval_timer_start(nfs4_callback_timer_call,
2740 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
2741 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2742 nfs4_callback_timer_on = 2;
2743 thread_call_cancel(nfs4_callback_timer_call);
2744 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2745 }
2746
2747 lck_mtx_unlock(nfs_global_mutex);
2748 }
2749
2750 /*
2751 * Receive mbufs from callback sockets into RPC records and process each record.
2752 * Detect connection has been closed and shut down.
2753 */
2754 void
2755 nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2756 {
2757 struct nfs_callback_socket *ncbsp = arg;
2758 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2759 struct timeval now;
2760 mbuf_t m;
2761 int error = 0, recv = 1;
2762
2763 lck_mtx_lock(nfs_global_mutex);
2764 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2765 /* wait if upcall is already in progress */
2766 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2767 msleep(ncbsp, nfs_global_mutex, PSOCK, "cbupcall", &ts);
2768 }
2769 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2770 lck_mtx_unlock(nfs_global_mutex);
2771
2772 /* loop while we make error-free progress */
2773 while (!error && recv) {
2774 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
2775 if (m) { /* handle the request */
2776 error = nfs4_cb_handler(ncbsp, m);
2777 }
2778 }
2779
2780 /* note: no error and no data indicates server closed its end */
2781 if ((error != EWOULDBLOCK) && (error || !recv)) {
2782 /*
2783 * Socket is either being closed or should be.
2784 * We can't close the socket in the context of the upcall.
2785 * So we mark it as dead and leave it for the cleanup timer to reap.
2786 */
2787 ncbsp->ncbs_stamp = 0;
2788 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2789 } else {
2790 microuptime(&now);
2791 ncbsp->ncbs_stamp = now.tv_sec;
2792 }
2793
2794 lck_mtx_lock(nfs_global_mutex);
2795 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2796 lck_mtx_unlock(nfs_global_mutex);
2797 wakeup(ncbsp);
2798 }
2799
2800 /*
2801 * Handle an NFS callback channel request.
2802 */
2803 int
2804 nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2805 {
2806 socket_t so = ncbsp->ncbs_so;
2807 struct nfsm_chain nmreq, nmrep;
2808 mbuf_t mhead = NULL, mrest = NULL, m;
2809 struct msghdr msg;
2810 struct nfsmount *nmp;
2811 fhandle_t fh;
2812 nfsnode_t np;
2813 nfs_stateid stateid;
2814 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2815 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2816 uint32_t auth_type, auth_len;
2817 uint32_t numres, *pnumres;
2818 int error = 0, replen, len;
2819 size_t sentlen = 0;
2820
2821 xid = numops = op = status = procnum = taglen = cbid = 0;
2822
2823 nfsm_chain_dissect_init(error, &nmreq, mreq);
2824 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2825 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
2826 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
2827 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
2828 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
2829 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
2830 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
2831 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
2832 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
2833 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
2834 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2835
2836 /* Handle authentication */
2837 /* XXX just ignore auth for now - handling kerberos may be tricky */
2838 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2839 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
2840 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2841 if (!error && (auth_len > 0)) {
2842 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2843 }
2844 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2845 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
2846 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2847 if (!error && (auth_len > 0)) {
2848 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2849 }
2850 if (error) {
2851 status = error;
2852 error = 0;
2853 goto nfsmout;
2854 }
2855
2856 switch (procnum) {
2857 case NFSPROC4_CB_NULL:
2858 status = NFSERR_RETVOID;
2859 break;
2860 case NFSPROC4_CB_COMPOUND:
2861 /* tag, minorversion, cb ident, numops, op array */
2862 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
2863 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2864
2865 /* start building the body of the response */
2866 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
2867 nfsm_chain_init(&nmrep, mrest);
2868
2869 /* copy tag from request to response */
2870 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
2871 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2872 nfsm_chain_get_32(error, &nmreq, val);
2873 nfsm_chain_add_32(error, &nmrep, val);
2874 }
2875
2876 /* insert number of results placeholder */
2877 numres = 0;
2878 nfsm_chain_add_32(error, &nmrep, numres);
2879 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2880
2881 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
2882 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
2883 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2884 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
2885 if (error) {
2886 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
2887 status = error;
2888 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
2889 status = NFSERR_RESOURCE;
2890 } else {
2891 status = NFSERR_SERVERFAULT;
2892 }
2893 error = 0;
2894 nfsm_chain_null(&nmrep);
2895 goto nfsmout;
2896 }
2897 /* match the callback ID to a registered mount */
2898 lck_mtx_lock(nfs_global_mutex);
2899 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2900 if (nmp->nm_cbid != cbid) {
2901 continue;
2902 }
2903 /* verify socket's source address matches this mount's server address */
2904 if (!nmp->nm_saddr) {
2905 continue;
2906 }
2907 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2908 break;
2909 }
2910 }
2911 /* mark the NFS mount as busy */
2912 if (nmp) {
2913 nmp->nm_cbrefs++;
2914 }
2915 lck_mtx_unlock(nfs_global_mutex);
2916 if (!nmp) {
2917 /* if no mount match, just drop socket. */
2918 error = EPERM;
2919 nfsm_chain_null(&nmrep);
2920 goto out;
2921 }
2922
2923 /* process ops, adding results to mrest */
2924 while (numops > 0) {
2925 numops--;
2926 nfsm_chain_get_32(error, &nmreq, op);
2927 if (error) {
2928 break;
2929 }
2930 switch (op) {
2931 case NFS_OP_CB_GETATTR:
2932 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2933 np = NULL;
2934 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
2935 bmlen = NFS_ATTR_BITMAP_LEN;
2936 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2937 if (error) {
2938 status = error;
2939 error = 0;
2940 numops = 0; /* don't process any more ops */
2941 } else {
2942 /* find the node for the file handle */
2943 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
2944 if (error || !np) {
2945 status = NFSERR_BADHANDLE;
2946 error = 0;
2947 np = NULL;
2948 numops = 0; /* don't process any more ops */
2949 }
2950 }
2951 nfsm_chain_add_32(error, &nmrep, op);
2952 nfsm_chain_add_32(error, &nmrep, status);
2953 if (!error && (status == EBADRPC)) {
2954 error = status;
2955 }
2956 if (np) {
2957 /* only allow returning size, change, and mtime attrs */
2958 NFS_CLEAR_ATTRIBUTES(&rbitmap);
2959 attrbytes = 0;
2960 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2961 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
2962 attrbytes += 2 * NFSX_UNSIGNED;
2963 }
2964 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2965 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
2966 attrbytes += 2 * NFSX_UNSIGNED;
2967 }
2968 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2969 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
2970 attrbytes += 3 * NFSX_UNSIGNED;
2971 }
2972 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
2973 nfsm_chain_add_32(error, &nmrep, attrbytes);
2974 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
2975 nfsm_chain_add_64(error, &nmrep,
2976 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
2977 }
2978 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
2979 nfsm_chain_add_64(error, &nmrep, np->n_size);
2980 }
2981 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
2982 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
2983 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
2984 }
2985 nfs_node_unlock(np);
2986 vnode_put(NFSTOV(np));
2987 np = NULL;
2988 }
2989 /*
2990 * If we hit an error building the reply, we can't easily back up.
2991 * So we'll just update the status and hope the server ignores the
2992 * extra garbage.
2993 */
2994 break;
2995 case NFS_OP_CB_RECALL:
2996 // (STATEID, TRUNCATE, FH) -> (STATUS)
2997 np = NULL;
2998 nfsm_chain_get_stateid(error, &nmreq, &stateid);
2999 nfsm_chain_get_32(error, &nmreq, truncate);
3000 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh);
3001 if (error) {
3002 status = error;
3003 error = 0;
3004 numops = 0; /* don't process any more ops */
3005 } else {
3006 /* find the node for the file handle */
3007 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
3008 if (error || !np) {
3009 status = NFSERR_BADHANDLE;
3010 error = 0;
3011 np = NULL;
3012 numops = 0; /* don't process any more ops */
3013 } else if (!(np->n_openflags & N_DELEG_MASK) ||
3014 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
3015 /* delegation stateid state doesn't match */
3016 status = NFSERR_BAD_STATEID;
3017 numops = 0; /* don't process any more ops */
3018 }
3019 if (!status) { /* add node to recall queue, and wake socket thread */
3020 nfs4_delegation_return_enqueue(np);
3021 }
3022 if (np) {
3023 nfs_node_unlock(np);
3024 vnode_put(NFSTOV(np));
3025 }
3026 }
3027 nfsm_chain_add_32(error, &nmrep, op);
3028 nfsm_chain_add_32(error, &nmrep, status);
3029 if (!error && (status == EBADRPC)) {
3030 error = status;
3031 }
3032 break;
3033 case NFS_OP_CB_ILLEGAL:
3034 default:
3035 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3036 status = NFSERR_OP_ILLEGAL;
3037 nfsm_chain_add_32(error, &nmrep, status);
3038 numops = 0; /* don't process any more ops */
3039 break;
3040 }
3041 numres++;
3042 }
3043
3044 if (!status && error) {
3045 if (error == EBADRPC) {
3046 status = error;
3047 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
3048 status = NFSERR_RESOURCE;
3049 } else {
3050 status = NFSERR_SERVERFAULT;
3051 }
3052 error = 0;
3053 }
3054
3055 /* Now, set the numres field */
3056 *pnumres = txdr_unsigned(numres);
3057 nfsm_chain_build_done(error, &nmrep);
3058 nfsm_chain_null(&nmrep);
3059
3060 /* drop the callback reference on the mount */
3061 lck_mtx_lock(nfs_global_mutex);
3062 nmp->nm_cbrefs--;
3063 if (!nmp->nm_cbid) {
3064 wakeup(&nmp->nm_cbrefs);
3065 }
3066 lck_mtx_unlock(nfs_global_mutex);
3067 break;
3068 }
3069
3070 nfsmout:
3071 if (status == EBADRPC) {
3072 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3073 }
3074
3075 /* build reply header */
3076 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3077 nfsm_chain_init(&nmrep, mhead);
3078 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3079 nfsm_chain_add_32(error, &nmrep, xid);
3080 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3081 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3082 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3083 if (status & NFSERR_AUTHERR) {
3084 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3085 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3086 } else {
3087 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3088 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3089 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3090 }
3091 } else {
3092 /* reply status */
3093 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3094 /* XXX RPCAUTH_NULL verifier */
3095 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3096 nfsm_chain_add_32(error, &nmrep, 0);
3097 /* accepted status */
3098 switch (status) {
3099 case EPROGUNAVAIL:
3100 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3101 break;
3102 case EPROGMISMATCH:
3103 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3104 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3105 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3106 break;
3107 case EPROCUNAVAIL:
3108 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3109 break;
3110 case EBADRPC:
3111 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3112 break;
3113 default:
3114 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
3115 if (status != NFSERR_RETVOID) {
3116 nfsm_chain_add_32(error, &nmrep, status);
3117 }
3118 break;
3119 }
3120 }
3121 nfsm_chain_build_done(error, &nmrep);
3122 if (error) {
3123 nfsm_chain_null(&nmrep);
3124 goto out;
3125 }
3126 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3127 if (error) {
3128 printf("nfs cb: mbuf_setnext failed %d\n", error);
3129 goto out;
3130 }
3131 mrest = NULL;
3132 /* Calculate the size of the reply */
3133 replen = 0;
3134 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
3135 replen += mbuf_len(m);
3136 }
3137 mbuf_pkthdr_setlen(mhead, replen);
3138 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3139 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3140 nfsm_chain_null(&nmrep);
3141
3142 /* send the reply */
3143 bzero(&msg, sizeof(msg));
3144 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3145 mhead = NULL;
3146 if (!error && ((int)sentlen != replen)) {
3147 error = EWOULDBLOCK;
3148 }
3149 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
3150 error = ETIMEDOUT;
3151 }
3152 out:
3153 if (error) {
3154 nfsm_chain_cleanup(&nmrep);
3155 }
3156 if (mhead) {
3157 mbuf_freem(mhead);
3158 }
3159 if (mrest) {
3160 mbuf_freem(mrest);
3161 }
3162 if (mreq) {
3163 mbuf_freem(mreq);
3164 }
3165 return error;
3166 }
3167 #endif /* CONFIG_NFS4 */
3168
3169 /*
3170 * Initialize an nfs_rpc_record_state structure.
3171 */
3172 void
3173 nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3174 {
3175 bzero(nrrsp, sizeof(*nrrsp));
3176 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3177 }
3178
3179 /*
3180 * Clean up an nfs_rpc_record_state structure.
3181 */
3182 void
3183 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3184 {
3185 if (nrrsp->nrrs_m) {
3186 mbuf_freem(nrrsp->nrrs_m);
3187 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3188 }
3189 }
3190
3191 /*
3192 * Read the next (marked) RPC record from the socket.
3193 *
3194 * *recvp returns if any data was received.
3195 * *mp returns the next complete RPC record
3196 */
3197 int
3198 nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
3199 {
3200 struct iovec aio;
3201 struct msghdr msg;
3202 size_t rcvlen;
3203 int error = 0;
3204 mbuf_t m;
3205
3206 *recvp = 0;
3207 *mp = NULL;
3208
3209 /* read the TCP RPC record marker */
3210 while (!error && nrrsp->nrrs_markerleft) {
3211 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
3212 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
3213 aio.iov_len = nrrsp->nrrs_markerleft;
3214 bzero(&msg, sizeof(msg));
3215 msg.msg_iov = &aio;
3216 msg.msg_iovlen = 1;
3217 error = sock_receive(so, &msg, flags, &rcvlen);
3218 if (error || !rcvlen) {
3219 break;
3220 }
3221 *recvp = 1;
3222 nrrsp->nrrs_markerleft -= rcvlen;
3223 if (nrrsp->nrrs_markerleft) {
3224 continue;
3225 }
3226 /* record marker complete */
3227 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3228 if (nrrsp->nrrs_fragleft & 0x80000000) {
3229 nrrsp->nrrs_lastfrag = 1;
3230 nrrsp->nrrs_fragleft &= ~0x80000000;
3231 }
3232 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3233 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
3234 /* This is SERIOUS! We are out of sync with the sender. */
3235 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3236 error = EFBIG;
3237 }
3238 }
3239
3240 /* read the TCP RPC record fragment */
3241 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3242 m = NULL;
3243 rcvlen = nrrsp->nrrs_fragleft;
3244 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
3245 if (error || !rcvlen || !m) {
3246 break;
3247 }
3248 *recvp = 1;
3249 /* append mbufs to list */
3250 nrrsp->nrrs_fragleft -= rcvlen;
3251 if (!nrrsp->nrrs_m) {
3252 nrrsp->nrrs_m = m;
3253 } else {
3254 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3255 if (error) {
3256 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3257 mbuf_freem(m);
3258 break;
3259 }
3260 }
3261 while (mbuf_next(m)) {
3262 m = mbuf_next(m);
3263 }
3264 nrrsp->nrrs_mlast = m;
3265 }
3266
3267 /* done reading fragment? */
3268 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3269 /* reset socket fragment parsing state */
3270 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3271 if (nrrsp->nrrs_lastfrag) {
3272 /* RPC record complete */
3273 *mp = nrrsp->nrrs_m;
3274 /* reset socket record parsing state */
3275 nrrsp->nrrs_reclen = 0;
3276 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3277 nrrsp->nrrs_lastfrag = 0;
3278 }
3279 }
3280
3281 return error;
3282 }
3283
3284
3285
3286 /*
3287 * The NFS client send routine.
3288 *
3289 * Send the given NFS request out the mount's socket.
3290 * Holds nfs_sndlock() for the duration of this call.
3291 *
3292 * - check for request termination (sigintr)
3293 * - wait for reconnect, if necessary
3294 * - UDP: check the congestion window
3295 * - make a copy of the request to send
3296 * - UDP: update the congestion window
3297 * - send the request
3298 *
3299 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3300 * rexmit count is also updated if this isn't the first send.
3301 *
3302 * If the send is not successful, make sure R_MUSTRESEND is set.
3303 * If this wasn't the first transmit, set R_RESENDERR.
3304 * Also, undo any UDP congestion window changes made.
3305 *
3306 * If the error appears to indicate that the socket should
3307 * be reconnected, mark the socket for reconnection.
3308 *
3309 * Only return errors when the request should be aborted.
3310 */
3311 int
3312 nfs_send(struct nfsreq *req, int wait)
3313 {
3314 struct nfsmount *nmp;
3315 struct nfs_socket *nso;
3316 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
3317 struct msghdr msg;
3318 struct sockaddr *sendnam;
3319 mbuf_t mreqcopy;
3320 size_t sentlen = 0;
3321 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3322
3323 again:
3324 error = nfs_sndlock(req);
3325 if (error) {
3326 lck_mtx_lock(&req->r_mtx);
3327 req->r_error = error;
3328 req->r_flags &= ~R_SENDING;
3329 lck_mtx_unlock(&req->r_mtx);
3330 return error;
3331 }
3332
3333 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
3334 if (error) {
3335 nfs_sndunlock(req);
3336 lck_mtx_lock(&req->r_mtx);
3337 req->r_error = error;
3338 req->r_flags &= ~R_SENDING;
3339 lck_mtx_unlock(&req->r_mtx);
3340 return error;
3341 }
3342 nmp = req->r_nmp;
3343 sotype = nmp->nm_sotype;
3344
3345 /*
3346 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3347 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3348 */
3349 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3350 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3351 error = ETIMEDOUT;
3352 nfs_sndunlock(req);
3353 lck_mtx_lock(&req->r_mtx);
3354 req->r_error = error;
3355 req->r_flags &= ~R_SENDING;
3356 lck_mtx_unlock(&req->r_mtx);
3357 return error;
3358 }
3359
3360 /* If the socket needs reconnection, do that now. */
3361 /* wait until socket is ready - unless this request is part of setup */
3362 lck_mtx_lock(&nmp->nm_lock);
3363 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3364 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
3365 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
3366 slpflag |= PCATCH;
3367 }
3368 lck_mtx_unlock(&nmp->nm_lock);
3369 nfs_sndunlock(req);
3370 if (!wait) {
3371 lck_mtx_lock(&req->r_mtx);
3372 req->r_flags &= ~R_SENDING;
3373 req->r_flags |= R_MUSTRESEND;
3374 req->r_rtt = 0;
3375 lck_mtx_unlock(&req->r_mtx);
3376 return 0;
3377 }
3378 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
3379 lck_mtx_lock(&req->r_mtx);
3380 req->r_flags &= ~R_MUSTRESEND;
3381 req->r_rtt = 0;
3382 lck_mtx_unlock(&req->r_mtx);
3383 lck_mtx_lock(&nmp->nm_lock);
3384 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3385 /* don't bother waiting if the socket thread won't be reconnecting it */
3386 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
3387 error = EIO;
3388 break;
3389 }
3390 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
3391 struct timeval now;
3392 microuptime(&now);
3393 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3394 /* soft mount in reconnect for a while... terminate ASAP */
3395 OSAddAtomic64(1, &nfsstats.rpctimeouts);
3396 req->r_flags |= R_SOFTTERM;
3397 req->r_error = error = ETIMEDOUT;
3398 break;
3399 }
3400 }
3401 /* make sure socket thread is running, then wait */
3402 nfs_mount_sock_thread_wake(nmp);
3403 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3404 break;
3405 }
3406 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
3407 slpflag = 0;
3408 }
3409 lck_mtx_unlock(&nmp->nm_lock);
3410 if (error) {
3411 lck_mtx_lock(&req->r_mtx);
3412 req->r_error = error;
3413 req->r_flags &= ~R_SENDING;
3414 lck_mtx_unlock(&req->r_mtx);
3415 return error;
3416 }
3417 goto again;
3418 }
3419 nso = nmp->nm_nso;
3420 /* note that we're using the mount's socket to do the send */
3421 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
3422 lck_mtx_unlock(&nmp->nm_lock);
3423 if (!nso) {
3424 nfs_sndunlock(req);
3425 lck_mtx_lock(&req->r_mtx);
3426 req->r_flags &= ~R_SENDING;
3427 req->r_flags |= R_MUSTRESEND;
3428 req->r_rtt = 0;
3429 lck_mtx_unlock(&req->r_mtx);
3430 return 0;
3431 }
3432
3433 lck_mtx_lock(&req->r_mtx);
3434 rexmit = (req->r_flags & R_SENT);
3435
3436 if (sotype == SOCK_DGRAM) {
3437 lck_mtx_lock(&nmp->nm_lock);
3438 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3439 /* if we can't send this out yet, wait on the cwnd queue */
3440 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
3441 lck_mtx_unlock(&nmp->nm_lock);
3442 nfs_sndunlock(req);
3443 req->r_flags &= ~R_SENDING;
3444 req->r_flags |= R_MUSTRESEND;
3445 lck_mtx_unlock(&req->r_mtx);
3446 if (!wait) {
3447 req->r_rtt = 0;
3448 return 0;
3449 }
3450 lck_mtx_lock(&nmp->nm_lock);
3451 while (nmp->nm_sent >= nmp->nm_cwnd) {
3452 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3453 break;
3454 }
3455 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
3456 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3457 slpflag = 0;
3458 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3459 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3460 req->r_cchain.tqe_next = NFSREQNOLIST;
3461 }
3462 }
3463 lck_mtx_unlock(&nmp->nm_lock);
3464 goto again;
3465 }
3466 /*
3467 * We update these *before* the send to avoid racing
3468 * against others who may be looking to send requests.
3469 */
3470 if (!rexmit) {
3471 /* first transmit */
3472 req->r_flags |= R_CWND;
3473 nmp->nm_sent += NFS_CWNDSCALE;
3474 } else {
3475 /*
3476 * When retransmitting, turn timing off
3477 * and divide congestion window by 2.
3478 */
3479 req->r_flags &= ~R_TIMING;
3480 nmp->nm_cwnd >>= 1;
3481 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
3482 nmp->nm_cwnd = NFS_CWNDSCALE;
3483 }
3484 }
3485 lck_mtx_unlock(&nmp->nm_lock);
3486 }
3487
3488 req->r_flags &= ~R_MUSTRESEND;
3489 lck_mtx_unlock(&req->r_mtx);
3490
3491 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
3492 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
3493 if (error) {
3494 if (wait) {
3495 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
3496 }
3497 nfs_sndunlock(req);
3498 lck_mtx_lock(&req->r_mtx);
3499 req->r_flags &= ~R_SENDING;
3500 req->r_flags |= R_MUSTRESEND;
3501 req->r_rtt = 0;
3502 lck_mtx_unlock(&req->r_mtx);
3503 return 0;
3504 }
3505
3506 bzero(&msg, sizeof(msg));
3507 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3508 msg.msg_name = (caddr_t)sendnam;
3509 msg.msg_namelen = sendnam->sa_len;
3510 }
3511 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
3512 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
3513 if (error || (sentlen != req->r_mreqlen)) {
3514 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3515 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
3516 }
3517
3518 if (!error && (sentlen != req->r_mreqlen)) {
3519 error = EWOULDBLOCK;
3520 }
3521 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3522
3523 lck_mtx_lock(&req->r_mtx);
3524 req->r_flags &= ~R_SENDING;
3525 req->r_rtt = 0;
3526 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
3527 req->r_rexmit = NFS_MAXREXMIT;
3528 }
3529
3530 if (!error) {
3531 /* SUCCESS */
3532 req->r_flags &= ~R_RESENDERR;
3533 if (rexmit) {
3534 OSAddAtomic64(1, &nfsstats.rpcretries);
3535 }
3536 req->r_flags |= R_SENT;
3537 if (req->r_flags & R_WAITSENT) {
3538 req->r_flags &= ~R_WAITSENT;
3539 wakeup(req);
3540 }
3541 nfs_sndunlock(req);
3542 lck_mtx_unlock(&req->r_mtx);
3543 return 0;
3544 }
3545
3546 /* send failed */
3547 req->r_flags |= R_MUSTRESEND;
3548 if (rexmit) {
3549 req->r_flags |= R_RESENDERR;
3550 }
3551 if ((error == EINTR) || (error == ERESTART)) {
3552 req->r_error = error;
3553 }
3554 lck_mtx_unlock(&req->r_mtx);
3555
3556 if (sotype == SOCK_DGRAM) {
3557 /*
3558 * Note: even though a first send may fail, we consider
3559 * the request sent for congestion window purposes.
3560 * So we don't need to undo any of the changes made above.
3561 */
3562 /*
3563 * Socket errors ignored for connectionless sockets??
3564 * For now, ignore them all
3565 */
3566 if ((error != EINTR) && (error != ERESTART) &&
3567 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
3568 int clearerror = 0, optlen = sizeof(clearerror);
3569 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
3570 #ifdef NFS_SOCKET_DEBUGGING
3571 if (clearerror) {
3572 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3573 error, clearerror);
3574 }
3575 #endif
3576 }
3577 }
3578
3579 /* check if it appears we should reconnect the socket */
3580 switch (error) {
3581 case EWOULDBLOCK:
3582 /* if send timed out, reconnect if on TCP */
3583 if (sotype != SOCK_STREAM) {
3584 break;
3585 }
3586 case EPIPE:
3587 case EADDRNOTAVAIL:
3588 case ENETDOWN:
3589 case ENETUNREACH:
3590 case ENETRESET:
3591 case ECONNABORTED:
3592 case ECONNRESET:
3593 case ENOTCONN:
3594 case ESHUTDOWN:
3595 case ECONNREFUSED:
3596 case EHOSTDOWN:
3597 case EHOSTUNREACH:
3598 /* case ECANCELED??? */
3599 needrecon = 1;
3600 break;
3601 }
3602 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
3603 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
3604 nfs_need_reconnect(nmp);
3605 }
3606
3607 nfs_sndunlock(req);
3608
3609 if (nfs_is_dead(error, nmp)) {
3610 error = EIO;
3611 }
3612
3613 /*
3614 * Don't log some errors:
3615 * EPIPE errors may be common with servers that drop idle connections.
3616 * EADDRNOTAVAIL may occur on network transitions.
3617 * ENOTCONN may occur under some network conditions.
3618 */
3619 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
3620 error = 0;
3621 }
3622 if (error && (error != EINTR) && (error != ERESTART)) {
3623 log(LOG_INFO, "nfs send error %d for server %s\n", error,
3624 !req->r_nmp ? "<unmounted>" :
3625 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3626 }
3627
3628 /* prefer request termination error over other errors */
3629 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
3630 if (error2) {
3631 error = error2;
3632 }
3633
3634 /* only allow the following errors to be returned */
3635 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
3636 (error != ENXIO) && (error != ETIMEDOUT)) {
3637 /*
3638 * We got some error we don't know what do do with,
3639 * i.e., we're not reconnecting, we map it to
3640 * EIO. Presumably our send failed and we better tell
3641 * the caller so they don't wait for a reply that is
3642 * never going to come. If we are reconnecting we
3643 * return 0 and the request will be resent.
3644 */
3645 error = needrecon ? 0 : EIO;
3646 }
3647 return error;
3648 }
3649
3650 /*
3651 * NFS client socket upcalls
3652 *
3653 * Pull RPC replies out of an NFS mount's socket and match them
3654 * up with the pending request.
3655 *
3656 * The datagram code is simple because we always get whole
3657 * messages out of the socket.
3658 *
3659 * The stream code is more involved because we have to parse
3660 * the RPC records out of the stream.
3661 */
3662
3663 /* NFS client UDP socket upcall */
3664 void
3665 nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3666 {
3667 struct nfsmount *nmp = arg;
3668 struct nfs_socket *nso = nmp->nm_nso;
3669 size_t rcvlen;
3670 mbuf_t m;
3671 int error = 0;
3672
3673 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3674 return;
3675 }
3676
3677 do {
3678 /* make sure we're on the current socket */
3679 if (!nso || (nso->nso_so != so)) {
3680 return;
3681 }
3682
3683 m = NULL;
3684 rcvlen = 1000000;
3685 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
3686 if (m) {
3687 nfs_request_match_reply(nmp, m);
3688 }
3689 } while (m && !error);
3690
3691 if (error && (error != EWOULDBLOCK)) {
3692 /* problems with the socket... mark for reconnection */
3693 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
3694 nfs_need_reconnect(nmp);
3695 }
3696 }
3697
3698 /* NFS client TCP socket upcall */
3699 void
3700 nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3701 {
3702 struct nfsmount *nmp = arg;
3703 struct nfs_socket *nso = nmp->nm_nso;
3704 struct nfs_rpc_record_state nrrs;
3705 mbuf_t m;
3706 int error = 0;
3707 int recv = 1;
3708 int wup = 0;
3709
3710 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3711 return;
3712 }
3713
3714 /* make sure we're on the current socket */
3715 lck_mtx_lock(&nmp->nm_lock);
3716 nso = nmp->nm_nso;
3717 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
3718 lck_mtx_unlock(&nmp->nm_lock);
3719 return;
3720 }
3721 lck_mtx_unlock(&nmp->nm_lock);
3722
3723 /* make sure this upcall should be trying to do work */
3724 lck_mtx_lock(&nso->nso_lock);
3725 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
3726 lck_mtx_unlock(&nso->nso_lock);
3727 return;
3728 }
3729 nso->nso_flags |= NSO_UPCALL;
3730 nrrs = nso->nso_rrs;
3731 lck_mtx_unlock(&nso->nso_lock);
3732
3733 /* loop while we make error-free progress */
3734 while (!error && recv) {
3735 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
3736 if (m) { /* match completed response with request */
3737 nfs_request_match_reply(nmp, m);
3738 }
3739 }
3740
3741 /* Update the sockets's rpc parsing state */
3742 lck_mtx_lock(&nso->nso_lock);
3743 nso->nso_rrs = nrrs;
3744 if (nso->nso_flags & NSO_DISCONNECTING) {
3745 wup = 1;
3746 }
3747 nso->nso_flags &= ~NSO_UPCALL;
3748 lck_mtx_unlock(&nso->nso_lock);
3749 if (wup) {
3750 wakeup(&nso->nso_flags);
3751 }
3752
3753 #ifdef NFS_SOCKET_DEBUGGING
3754 if (!recv && (error != EWOULDBLOCK)) {
3755 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
3756 }
3757 #endif
3758 /* note: no error and no data indicates server closed its end */
3759 if ((error != EWOULDBLOCK) && (error || !recv)) {
3760 /* problems with the socket... mark for reconnection */
3761 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
3762 nfs_need_reconnect(nmp);
3763 }
3764 }
3765
3766 /*
3767 * "poke" a socket to try to provoke any pending errors
3768 */
3769 void
3770 nfs_sock_poke(struct nfsmount *nmp)
3771 {
3772 struct iovec aio;
3773 struct msghdr msg;
3774 size_t len;
3775 int error = 0;
3776 int dummy;
3777
3778 lck_mtx_lock(&nmp->nm_lock);
3779 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3780 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
3781 /* Nothing to poke */
3782 nmp->nm_sockflags &= ~NMSOCK_POKE;
3783 wakeup(&nmp->nm_sockflags);
3784 lck_mtx_unlock(&nmp->nm_lock);
3785 return;
3786 }
3787 lck_mtx_unlock(&nmp->nm_lock);
3788 aio.iov_base = &dummy;
3789 aio.iov_len = 0;
3790 len = 0;
3791 bzero(&msg, sizeof(msg));
3792 msg.msg_iov = &aio;
3793 msg.msg_iovlen = 1;
3794 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
3795 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
3796 lck_mtx_lock(&nmp->nm_lock);
3797 nmp->nm_sockflags &= ~NMSOCK_POKE;
3798 wakeup(&nmp->nm_sockflags);
3799 lck_mtx_unlock(&nmp->nm_lock);
3800 nfs_is_dead(error, nmp);
3801 }
3802
3803 /*
3804 * Match an RPC reply with the corresponding request
3805 */
3806 void
3807 nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3808 {
3809 struct nfsreq *req;
3810 struct nfsm_chain nmrep;
3811 u_int32_t reply = 0, rxid = 0;
3812 int error = 0, asyncioq, t1;
3813
3814 /* Get the xid and check that it is an rpc reply */
3815 nfsm_chain_dissect_init(error, &nmrep, mrep);
3816 nfsm_chain_get_32(error, &nmrep, rxid);
3817 nfsm_chain_get_32(error, &nmrep, reply);
3818 if (error || (reply != RPC_REPLY)) {
3819 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3820 mbuf_freem(mrep);
3821 return;
3822 }
3823
3824 /*
3825 * Loop through the request list to match up the reply
3826 * Iff no match, just drop it.
3827 */
3828 lck_mtx_lock(nfs_request_mutex);
3829 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
3830 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3831 continue;
3832 }
3833 /* looks like we have it, grab lock and double check */
3834 lck_mtx_lock(&req->r_mtx);
3835 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3836 lck_mtx_unlock(&req->r_mtx);
3837 continue;
3838 }
3839 /* Found it.. */
3840 req->r_nmrep = nmrep;
3841 lck_mtx_lock(&nmp->nm_lock);
3842 if (nmp->nm_sotype == SOCK_DGRAM) {
3843 /*
3844 * Update congestion window.
3845 * Do the additive increase of one rpc/rtt.
3846 */
3847 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3848 if (nmp->nm_cwnd <= nmp->nm_sent) {
3849 nmp->nm_cwnd +=
3850 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
3851 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
3852 if (nmp->nm_cwnd > NFS_MAXCWND) {
3853 nmp->nm_cwnd = NFS_MAXCWND;
3854 }
3855 }
3856 if (req->r_flags & R_CWND) {
3857 nmp->nm_sent -= NFS_CWNDSCALE;
3858 req->r_flags &= ~R_CWND;
3859 }
3860 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3861 /* congestion window is open, poke the cwnd queue */
3862 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3863 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3864 req2->r_cchain.tqe_next = NFSREQNOLIST;
3865 wakeup(req2);
3866 }
3867 }
3868 /*
3869 * Update rtt using a gain of 0.125 on the mean
3870 * and a gain of 0.25 on the deviation.
3871 */
3872 if (req->r_flags & R_TIMING) {
3873 /*
3874 * Since the timer resolution of
3875 * NFS_HZ is so course, it can often
3876 * result in r_rtt == 0. Since
3877 * r_rtt == N means that the actual
3878 * rtt is between N+dt and N+2-dt ticks,
3879 * add 1.
3880 */
3881 if (proct[req->r_procnum] == 0) {
3882 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
3883 }
3884 t1 = req->r_rtt + 1;
3885 t1 -= (NFS_SRTT(req) >> 3);
3886 NFS_SRTT(req) += t1;
3887 if (t1 < 0) {
3888 t1 = -t1;
3889 }
3890 t1 -= (NFS_SDRTT(req) >> 2);
3891 NFS_SDRTT(req) += t1;
3892 }
3893 nmp->nm_timeouts = 0;
3894 lck_mtx_unlock(&nmp->nm_lock);
3895 /* signal anyone waiting on this request */
3896 wakeup(req);
3897 asyncioq = (req->r_callback.rcb_func != NULL);
3898 #if CONFIG_NFS_GSS
3899 if (nfs_request_using_gss(req)) {
3900 nfs_gss_clnt_rpcdone(req);
3901 }
3902 #endif /* CONFIG_NFS_GSS */
3903 lck_mtx_unlock(&req->r_mtx);
3904 lck_mtx_unlock(nfs_request_mutex);
3905 /* if it's an async RPC with a callback, queue it up */
3906 if (asyncioq) {
3907 nfs_asyncio_finish(req);
3908 }
3909 break;
3910 }
3911
3912 if (!req) {
3913 /* not matched to a request, so drop it. */
3914 lck_mtx_unlock(nfs_request_mutex);
3915 OSAddAtomic64(1, &nfsstats.rpcunexpected);
3916 mbuf_freem(mrep);
3917 }
3918 }
3919
3920 /*
3921 * Wait for the reply for a given request...
3922 * ...potentially resending the request if necessary.
3923 */
3924 int
3925 nfs_wait_reply(struct nfsreq *req)
3926 {
3927 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3928 int error = 0, slpflag, first = 1;
3929
3930 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
3931 slpflag = PCATCH;
3932 } else {
3933 slpflag = 0;
3934 }
3935
3936 lck_mtx_lock(&req->r_mtx);
3937 while (!req->r_nmrep.nmc_mhead) {
3938 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
3939 break;
3940 }
3941 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3942 break;
3943 }
3944 /* check if we need to resend */
3945 if (req->r_flags & R_MUSTRESEND) {
3946 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3947 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
3948 req->r_flags |= R_SENDING;
3949 lck_mtx_unlock(&req->r_mtx);
3950 if (nfs_request_using_gss(req)) {
3951 /*
3952 * It's an RPCSEC_GSS request.
3953 * Can't just resend the original request
3954 * without bumping the cred sequence number.
3955 * Go back and re-build the request.
3956 */
3957 lck_mtx_lock(&req->r_mtx);
3958 req->r_flags &= ~R_SENDING;
3959 lck_mtx_unlock(&req->r_mtx);
3960 return EAGAIN;
3961 }
3962 error = nfs_send(req, 1);
3963 lck_mtx_lock(&req->r_mtx);
3964 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
3965 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
3966 if (error) {
3967 break;
3968 }
3969 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3970 break;
3971 }
3972 }
3973 /* need to poll if we're P_NOREMOTEHANG */
3974 if (nfs_noremotehang(req->r_thread)) {
3975 ts.tv_sec = 1;
3976 }
3977 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
3978 first = slpflag = 0;
3979 }
3980 lck_mtx_unlock(&req->r_mtx);
3981
3982 return error;
3983 }
3984
3985 /*
3986 * An NFS request goes something like this:
3987 * (nb: always frees up mreq mbuf list)
3988 * nfs_request_create()
3989 * - allocates a request struct if one is not provided
3990 * - initial fill-in of the request struct
3991 * nfs_request_add_header()
3992 * - add the RPC header
3993 * nfs_request_send()
3994 * - link it into list
3995 * - call nfs_send() for first transmit
3996 * nfs_request_wait()
3997 * - call nfs_wait_reply() to wait for the reply
3998 * nfs_request_finish()
3999 * - break down rpc header and return with error or nfs reply
4000 * pointed to by nmrep.
4001 * nfs_request_rele()
4002 * nfs_request_destroy()
4003 * - clean up the request struct
4004 * - free the request struct if it was allocated by nfs_request_create()
4005 */
4006
4007 /*
4008 * Set up an NFS request struct (allocating if no request passed in).
4009 */
4010 int
4011 nfs_request_create(
4012 nfsnode_t np,
4013 mount_t mp, /* used only if !np */
4014 struct nfsm_chain *nmrest,
4015 int procnum,
4016 thread_t thd,
4017 kauth_cred_t cred,
4018 struct nfsreq **reqp)
4019 {
4020 struct nfsreq *req, *newreq = NULL;
4021 struct nfsmount *nmp;
4022
4023 req = *reqp;
4024 if (!req) {
4025 /* allocate a new NFS request structure */
4026 MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK);
4027 if (!newreq) {
4028 mbuf_freem(nmrest->nmc_mhead);
4029 nmrest->nmc_mhead = NULL;
4030 return ENOMEM;
4031 }
4032 req = newreq;
4033 }
4034
4035 bzero(req, sizeof(*req));
4036 if (req == newreq) {
4037 req->r_flags = R_ALLOCATED;
4038 }
4039
4040 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
4041 if (nfs_mount_gone(nmp)) {
4042 if (newreq) {
4043 FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
4044 }
4045 return ENXIO;
4046 }
4047 lck_mtx_lock(&nmp->nm_lock);
4048 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
4049 (nmp->nm_state & NFSSTA_TIMEO)) {
4050 lck_mtx_unlock(&nmp->nm_lock);
4051 mbuf_freem(nmrest->nmc_mhead);
4052 nmrest->nmc_mhead = NULL;
4053 if (newreq) {
4054 FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ);
4055 }
4056 return ENXIO;
4057 }
4058
4059 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
4060 OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
4061 }
4062 if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) {
4063 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
4064 }
4065
4066 lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL);
4067 req->r_nmp = nmp;
4068 nmp->nm_ref++;
4069 req->r_np = np;
4070 req->r_thread = thd;
4071 if (!thd) {
4072 req->r_flags |= R_NOINTR;
4073 }
4074 if (IS_VALID_CRED(cred)) {
4075 kauth_cred_ref(cred);
4076 req->r_cred = cred;
4077 }
4078 req->r_procnum = procnum;
4079 if (proct[procnum] > 0) {
4080 req->r_flags |= R_TIMING;
4081 }
4082 req->r_nmrep.nmc_mhead = NULL;
4083 SLIST_INIT(&req->r_gss_seqlist);
4084 req->r_achain.tqe_next = NFSREQNOLIST;
4085 req->r_rchain.tqe_next = NFSREQNOLIST;
4086 req->r_cchain.tqe_next = NFSREQNOLIST;
4087
4088 /* set auth flavor to use for request */
4089 if (!req->r_cred) {
4090 req->r_auth = RPCAUTH_NONE;
4091 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
4092 req->r_auth = req->r_np->n_auth;
4093 } else {
4094 req->r_auth = nmp->nm_auth;
4095 }
4096
4097 lck_mtx_unlock(&nmp->nm_lock);
4098
4099 /* move the request mbuf chain to the nfsreq */
4100 req->r_mrest = nmrest->nmc_mhead;
4101 nmrest->nmc_mhead = NULL;
4102
4103 req->r_flags |= R_INITTED;
4104 req->r_refs = 1;
4105 if (newreq) {
4106 *reqp = req;
4107 }
4108 return 0;
4109 }
4110
4111 /*
4112 * Clean up and free an NFS request structure.
4113 */
4114 void
4115 nfs_request_destroy(struct nfsreq *req)
4116 {
4117 struct nfsmount *nmp;
4118 int clearjbtimeo = 0;
4119
4120 #if CONFIG_NFS_GSS
4121 struct gss_seq *gsp, *ngsp;
4122 #endif
4123
4124 if (!req || !(req->r_flags & R_INITTED)) {
4125 return;
4126 }
4127 nmp = req->r_nmp;
4128 req->r_flags &= ~R_INITTED;
4129 if (req->r_lflags & RL_QUEUED) {
4130 nfs_reqdequeue(req);
4131 }
4132
4133 if (req->r_achain.tqe_next != NFSREQNOLIST) {
4134 /*
4135 * Still on an async I/O queue?
4136 * %%% But which one, we may be on a local iod.
4137 */
4138 lck_mtx_lock(nfsiod_mutex);
4139 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
4140 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4141 req->r_achain.tqe_next = NFSREQNOLIST;
4142 }
4143 lck_mtx_unlock(nfsiod_mutex);
4144 }
4145
4146 lck_mtx_lock(&req->r_mtx);
4147 if (nmp) {
4148 lck_mtx_lock(&nmp->nm_lock);
4149 if (req->r_flags & R_CWND) {
4150 /* Decrement the outstanding request count. */
4151 req->r_flags &= ~R_CWND;
4152 nmp->nm_sent -= NFS_CWNDSCALE;
4153 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4154 /* congestion window is open, poke the cwnd queue */
4155 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4156 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4157 req2->r_cchain.tqe_next = NFSREQNOLIST;
4158 wakeup(req2);
4159 }
4160 }
4161 assert((req->r_flags & R_RESENDQ) == 0);
4162 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4163 if (req->r_rchain.tqe_next != NFSREQNOLIST) {
4164 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4165 req->r_rchain.tqe_next = NFSREQNOLIST;
4166 if (req->r_flags & R_RESENDQ) {
4167 req->r_flags &= ~R_RESENDQ;
4168 }
4169 }
4170 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4171 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4172 req->r_cchain.tqe_next = NFSREQNOLIST;
4173 }
4174 if (req->r_flags & R_JBTPRINTFMSG) {
4175 req->r_flags &= ~R_JBTPRINTFMSG;
4176 nmp->nm_jbreqs--;
4177 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4178 }
4179 lck_mtx_unlock(&nmp->nm_lock);
4180 }
4181 lck_mtx_unlock(&req->r_mtx);
4182
4183 if (clearjbtimeo) {
4184 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4185 }
4186 if (req->r_mhead) {
4187 mbuf_freem(req->r_mhead);
4188 } else if (req->r_mrest) {
4189 mbuf_freem(req->r_mrest);
4190 }
4191 if (req->r_nmrep.nmc_mhead) {
4192 mbuf_freem(req->r_nmrep.nmc_mhead);
4193 }
4194 if (IS_VALID_CRED(req->r_cred)) {
4195 kauth_cred_unref(&req->r_cred);
4196 }
4197 #if CONFIG_NFS_GSS
4198 if (nfs_request_using_gss(req)) {
4199 nfs_gss_clnt_rpcdone(req);
4200 }
4201 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
4202 FREE(gsp, M_TEMP);
4203 if (req->r_gss_ctx) {
4204 nfs_gss_clnt_ctx_unref(req);
4205 }
4206 #endif /* CONFIG_NFS_GSS */
4207 if (req->r_wrongsec) {
4208 FREE(req->r_wrongsec, M_TEMP);
4209 }
4210 if (nmp) {
4211 nfs_mount_rele(nmp);
4212 }
4213 lck_mtx_destroy(&req->r_mtx, nfs_request_grp);
4214 if (req->r_flags & R_ALLOCATED) {
4215 FREE_ZONE(req, sizeof(*req), M_NFSREQ);
4216 }
4217 }
4218
4219 void
4220 nfs_request_ref(struct nfsreq *req, int locked)
4221 {
4222 if (!locked) {
4223 lck_mtx_lock(&req->r_mtx);
4224 }
4225 if (req->r_refs <= 0) {
4226 panic("nfsreq reference error");
4227 }
4228 req->r_refs++;
4229 if (!locked) {
4230 lck_mtx_unlock(&req->r_mtx);
4231 }
4232 }
4233
4234 void
4235 nfs_request_rele(struct nfsreq *req)
4236 {
4237 int destroy;
4238
4239 lck_mtx_lock(&req->r_mtx);
4240 if (req->r_refs <= 0) {
4241 panic("nfsreq reference underflow");
4242 }
4243 req->r_refs--;
4244 destroy = (req->r_refs == 0);
4245 lck_mtx_unlock(&req->r_mtx);
4246 if (destroy) {
4247 nfs_request_destroy(req);
4248 }
4249 }
4250
4251
4252 /*
4253 * Add an (updated) RPC header with authorization to an NFS request.
4254 */
4255 int
4256 nfs_request_add_header(struct nfsreq *req)
4257 {
4258 struct nfsmount *nmp;
4259 int error = 0;
4260 mbuf_t m;
4261
4262 /* free up any previous header */
4263 if ((m = req->r_mhead)) {
4264 while (m && (m != req->r_mrest)) {
4265 m = mbuf_free(m);
4266 }
4267 req->r_mhead = NULL;
4268 }
4269
4270 nmp = req->r_nmp;
4271 if (nfs_mount_gone(nmp)) {
4272 return ENXIO;
4273 }
4274
4275 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
4276 if (error) {
4277 return error;
4278 }
4279
4280 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
4281 nmp = req->r_nmp;
4282 if (nfs_mount_gone(nmp)) {
4283 return ENXIO;
4284 }
4285 lck_mtx_lock(&nmp->nm_lock);
4286 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
4287 req->r_retry = nmp->nm_retry;
4288 } else {
4289 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4290 }
4291 lck_mtx_unlock(&nmp->nm_lock);
4292
4293 return error;
4294 }
4295
4296
4297 /*
4298 * Queue an NFS request up and send it out.
4299 */
4300 int
4301 nfs_request_send(struct nfsreq *req, int wait)
4302 {
4303 struct nfsmount *nmp;
4304 struct timeval now;
4305
4306 lck_mtx_lock(&req->r_mtx);
4307 req->r_flags |= R_SENDING;
4308 lck_mtx_unlock(&req->r_mtx);
4309
4310 lck_mtx_lock(nfs_request_mutex);
4311
4312 nmp = req->r_nmp;
4313 if (nfs_mount_gone(nmp)) {
4314 lck_mtx_unlock(nfs_request_mutex);
4315 return ENXIO;
4316 }
4317
4318 microuptime(&now);
4319 if (!req->r_start) {
4320 req->r_start = now.tv_sec;
4321 req->r_lastmsg = now.tv_sec -
4322 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
4323 }
4324
4325 OSAddAtomic64(1, &nfsstats.rpcrequests);
4326
4327 /*
4328 * Chain request into list of outstanding requests. Be sure
4329 * to put it LAST so timer finds oldest requests first.
4330 * Make sure that the request queue timer is running
4331 * to check for possible request timeout.
4332 */
4333 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4334 req->r_lflags |= RL_QUEUED;
4335 if (!nfs_request_timer_on) {
4336 nfs_request_timer_on = 1;
4337 nfs_interval_timer_start(nfs_request_timer_call,
4338 NFS_REQUESTDELAY);
4339 }
4340 lck_mtx_unlock(nfs_request_mutex);
4341
4342 /* Send the request... */
4343 return nfs_send(req, wait);
4344 }
4345
4346 /*
4347 * Call nfs_wait_reply() to wait for the reply.
4348 */
4349 void
4350 nfs_request_wait(struct nfsreq *req)
4351 {
4352 req->r_error = nfs_wait_reply(req);
4353 }
4354
4355 /*
4356 * Finish up an NFS request by dequeueing it and
4357 * doing the initial NFS request reply processing.
4358 */
4359 int
4360 nfs_request_finish(
4361 struct nfsreq *req,
4362 struct nfsm_chain *nmrepp,
4363 int *status)
4364 {
4365 struct nfsmount *nmp;
4366 mbuf_t mrep;
4367 int verf_type = 0;
4368 uint32_t verf_len = 0;
4369 uint32_t reply_status = 0;
4370 uint32_t rejected_status = 0;
4371 uint32_t auth_status = 0;
4372 uint32_t accepted_status = 0;
4373 struct nfsm_chain nmrep;
4374 int error, clearjbtimeo;
4375
4376 error = req->r_error;
4377
4378 if (nmrepp) {
4379 nmrepp->nmc_mhead = NULL;
4380 }
4381
4382 /* RPC done, unlink the request. */
4383 nfs_reqdequeue(req);
4384
4385 mrep = req->r_nmrep.nmc_mhead;
4386
4387 nmp = req->r_nmp;
4388
4389 if ((req->r_flags & R_CWND) && nmp) {
4390 /*
4391 * Decrement the outstanding request count.
4392 */
4393 req->r_flags &= ~R_CWND;
4394 lck_mtx_lock(&nmp->nm_lock);
4395 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4396 nmp->nm_sent -= NFS_CWNDSCALE;
4397 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4398 /* congestion window is open, poke the cwnd queue */
4399 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4400 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4401 req2->r_cchain.tqe_next = NFSREQNOLIST;
4402 wakeup(req2);
4403 }
4404 lck_mtx_unlock(&nmp->nm_lock);
4405 }
4406
4407 #if CONFIG_NFS_GSS
4408 if (nfs_request_using_gss(req)) {
4409 /*
4410 * If the request used an RPCSEC_GSS credential
4411 * then reset its sequence number bit in the
4412 * request window.
4413 */
4414 nfs_gss_clnt_rpcdone(req);
4415
4416 /*
4417 * If we need to re-send, go back and re-build the
4418 * request based on a new sequence number.
4419 * Note that we're using the original XID.
4420 */
4421 if (error == EAGAIN) {
4422 req->r_error = 0;
4423 if (mrep) {
4424 mbuf_freem(mrep);
4425 }
4426 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
4427 req->r_nmrep.nmc_mhead = NULL;
4428 req->r_flags |= R_RESTART;
4429 if (error == ENEEDAUTH) {
4430 req->r_xid = 0; // get a new XID
4431 error = 0;
4432 }
4433 goto nfsmout;
4434 }
4435 }
4436 #endif /* CONFIG_NFS_GSS */
4437
4438 /*
4439 * If there was a successful reply, make sure to mark the mount as up.
4440 * If a tprintf message was given (or if this is a timed-out soft mount)
4441 * then post a tprintf message indicating the server is alive again.
4442 */
4443 if (!error) {
4444 if ((req->r_flags & R_TPRINTFMSG) ||
4445 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
4446 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
4447 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
4448 } else {
4449 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
4450 }
4451 }
4452 if (!error && !nmp) {
4453 error = ENXIO;
4454 }
4455 nfsmout_if(error);
4456
4457 /*
4458 * break down the RPC header and check if ok
4459 */
4460 nmrep = req->r_nmrep;
4461 nfsm_chain_get_32(error, &nmrep, reply_status);
4462 nfsmout_if(error);
4463 if (reply_status == RPC_MSGDENIED) {
4464 nfsm_chain_get_32(error, &nmrep, rejected_status);
4465 nfsmout_if(error);
4466 if (rejected_status == RPC_MISMATCH) {
4467 error = ENOTSUP;
4468 goto nfsmout;
4469 }
4470 nfsm_chain_get_32(error, &nmrep, auth_status);
4471 nfsmout_if(error);
4472 switch (auth_status) {
4473 #if CONFIG_NFS_GSS
4474 case RPCSEC_GSS_CREDPROBLEM:
4475 case RPCSEC_GSS_CTXPROBLEM:
4476 /*
4477 * An RPCSEC_GSS cred or context problem.
4478 * We can't use it anymore.
4479 * Restore the args, renew the context
4480 * and set up for a resend.
4481 */
4482 error = nfs_gss_clnt_args_restore(req);
4483 if (error && error != ENEEDAUTH) {
4484 break;
4485 }
4486
4487 if (!error) {
4488 error = nfs_gss_clnt_ctx_renew(req);
4489 if (error) {
4490 break;
4491 }
4492 }
4493 mbuf_freem(mrep);
4494 req->r_nmrep.nmc_mhead = NULL;
4495 req->r_xid = 0; // get a new XID
4496 req->r_flags |= R_RESTART;
4497 goto nfsmout;
4498 #endif /* CONFIG_NFS_GSS */
4499 default:
4500 error = EACCES;
4501 break;
4502 }
4503 goto nfsmout;
4504 }
4505
4506 /* Now check the verifier */
4507 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4508 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4509 nfsmout_if(error);
4510
4511 switch (req->r_auth) {
4512 case RPCAUTH_NONE:
4513 case RPCAUTH_SYS:
4514 /* Any AUTH_SYS verifier is ignored */
4515 if (verf_len > 0) {
4516 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
4517 }
4518 nfsm_chain_get_32(error, &nmrep, accepted_status);
4519 break;
4520 #if CONFIG_NFS_GSS
4521 case RPCAUTH_KRB5:
4522 case RPCAUTH_KRB5I:
4523 case RPCAUTH_KRB5P:
4524 error = nfs_gss_clnt_verf_get(req, &nmrep,
4525 verf_type, verf_len, &accepted_status);
4526 break;
4527 #endif /* CONFIG_NFS_GSS */
4528 }
4529 nfsmout_if(error);
4530
4531 switch (accepted_status) {
4532 case RPC_SUCCESS:
4533 if (req->r_procnum == NFSPROC_NULL) {
4534 /*
4535 * The NFS null procedure is unique,
4536 * in not returning an NFS status.
4537 */
4538 *status = NFS_OK;
4539 } else {
4540 nfsm_chain_get_32(error, &nmrep, *status);
4541 nfsmout_if(error);
4542 }
4543
4544 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
4545 /*
4546 * It's a JUKEBOX error - delay and try again
4547 */
4548 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4549
4550 mbuf_freem(mrep);
4551 req->r_nmrep.nmc_mhead = NULL;
4552 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4553 /* we're not yet completely mounted and */
4554 /* we can't complete an RPC, so we fail */
4555 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4556 nfs_softterm(req);
4557 error = req->r_error;
4558 goto nfsmout;
4559 }
4560 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
4561 if (req->r_delay > 30) {
4562 req->r_delay = 30;
4563 }
4564 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4565 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4566 req->r_flags |= R_JBTPRINTFMSG;
4567 lck_mtx_lock(&nmp->nm_lock);
4568 nmp->nm_jbreqs++;
4569 lck_mtx_unlock(&nmp->nm_lock);
4570 }
4571 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
4572 "resource temporarily unavailable (jukebox)", 0);
4573 }
4574 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
4575 !(req->r_flags & R_NOINTR)) {
4576 /* for soft mounts, just give up after a short while */
4577 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4578 nfs_softterm(req);
4579 error = req->r_error;
4580 goto nfsmout;
4581 }
4582 delay = req->r_delay;
4583 if (req->r_callback.rcb_func) {
4584 struct timeval now;
4585 microuptime(&now);
4586 req->r_resendtime = now.tv_sec + delay;
4587 } else {
4588 do {
4589 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
4590 goto nfsmout;
4591 }
4592 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
4593 slpflag = 0;
4594 } while (--delay > 0);
4595 }
4596 req->r_xid = 0; // get a new XID
4597 req->r_flags |= R_RESTART;
4598 req->r_start = 0;
4599 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
4600 return 0;
4601 }
4602
4603 if (req->r_flags & R_JBTPRINTFMSG) {
4604 req->r_flags &= ~R_JBTPRINTFMSG;
4605 lck_mtx_lock(&nmp->nm_lock);
4606 nmp->nm_jbreqs--;
4607 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4608 lck_mtx_unlock(&nmp->nm_lock);
4609 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4610 }
4611
4612 #if CONFIG_NFS4
4613 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4614 /*
4615 * Hmmm... we need to try a different security flavor.
4616 * The first time a request hits this, we will allocate an array
4617 * to track flavors to try. We fill the array with the mount's
4618 * preferred flavors or the server's preferred flavors or just the
4619 * flavors we support.
4620 */
4621 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4622 int srvcount, i, j;
4623
4624 /* Call SECINFO to try to get list of flavors from server. */
4625 srvcount = NX_MAX_SEC_FLAVORS;
4626 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4627
4628 if (!req->r_wrongsec) {
4629 /* first time... set up flavor array */
4630 MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK);
4631 if (!req->r_wrongsec) {
4632 error = EACCES;
4633 goto nfsmout;
4634 }
4635 i = 0;
4636 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
4637 for (; i < nmp->nm_sec.count; i++) {
4638 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
4639 }
4640 } else if (srvcount) { /* otherwise use the server's list of flavors */
4641 for (; i < srvcount; i++) {
4642 req->r_wrongsec[i] = srvflavors[i];
4643 }
4644 } else { /* otherwise, just try the flavors we support. */
4645 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4646 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4647 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4648 req->r_wrongsec[i++] = RPCAUTH_SYS;
4649 req->r_wrongsec[i++] = RPCAUTH_NONE;
4650 }
4651 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
4652 req->r_wrongsec[i] = RPCAUTH_INVALID;
4653 }
4654 }
4655
4656 /* clear the current flavor from the list */
4657 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4658 if (req->r_wrongsec[i] == req->r_auth) {
4659 req->r_wrongsec[i] = RPCAUTH_INVALID;
4660 }
4661 }
4662
4663 /* find the next flavor to try */
4664 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4665 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
4666 if (!srvcount) { /* no server list, just try it */
4667 break;
4668 }
4669 /* check that it's in the server's list */
4670 for (j = 0; j < srvcount; j++) {
4671 if (req->r_wrongsec[i] == srvflavors[j]) {
4672 break;
4673 }
4674 }
4675 if (j < srvcount) { /* found */
4676 break;
4677 }
4678 /* not found in server list */
4679 req->r_wrongsec[i] = RPCAUTH_INVALID;
4680 }
4681 }
4682 if (i == NX_MAX_SEC_FLAVORS) {
4683 /* nothing left to try! */
4684 error = EACCES;
4685 goto nfsmout;
4686 }
4687
4688 /* retry with the next auth flavor */
4689 req->r_auth = req->r_wrongsec[i];
4690 req->r_xid = 0; // get a new XID
4691 req->r_flags |= R_RESTART;
4692 req->r_start = 0;
4693 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
4694 return 0;
4695 }
4696 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4697 /*
4698 * We renegotiated security for this request; so update the
4699 * default security flavor for the associated node.
4700 */
4701 if (req->r_np) {
4702 req->r_np->n_auth = req->r_auth;
4703 }
4704 }
4705 #endif /* CONFIG_NFS4 */
4706 if (*status == NFS_OK) {
4707 /*
4708 * Successful NFS request
4709 */
4710 *nmrepp = nmrep;
4711 req->r_nmrep.nmc_mhead = NULL;
4712 break;
4713 }
4714 /* Got an NFS error of some kind */
4715
4716 /*
4717 * If the File Handle was stale, invalidate the
4718 * lookup cache, just in case.
4719 */
4720 if ((*status == ESTALE) && req->r_np) {
4721 cache_purge(NFSTOV(req->r_np));
4722 /* if monitored, also send delete event */
4723 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4724 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4725 }
4726 }
4727 if (nmp->nm_vers == NFS_VER2) {
4728 mbuf_freem(mrep);
4729 } else {
4730 *nmrepp = nmrep;
4731 }
4732 req->r_nmrep.nmc_mhead = NULL;
4733 error = 0;
4734 break;
4735 case RPC_PROGUNAVAIL:
4736 error = EPROGUNAVAIL;
4737 break;
4738 case RPC_PROGMISMATCH:
4739 error = ERPCMISMATCH;
4740 break;
4741 case RPC_PROCUNAVAIL:
4742 error = EPROCUNAVAIL;
4743 break;
4744 case RPC_GARBAGE:
4745 error = EBADRPC;
4746 break;
4747 case RPC_SYSTEM_ERR:
4748 default:
4749 error = EIO;
4750 break;
4751 }
4752 nfsmout:
4753 if (req->r_flags & R_JBTPRINTFMSG) {
4754 req->r_flags &= ~R_JBTPRINTFMSG;
4755 lck_mtx_lock(&nmp->nm_lock);
4756 nmp->nm_jbreqs--;
4757 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4758 lck_mtx_unlock(&nmp->nm_lock);
4759 if (clearjbtimeo) {
4760 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4761 }
4762 }
4763 FSDBG(273, R_XID32(req->r_xid), nmp, req,
4764 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4765 return error;
4766 }
4767
4768 /*
4769 * NFS request using a GSS/Kerberos security flavor?
4770 */
4771 int
4772 nfs_request_using_gss(struct nfsreq *req)
4773 {
4774 if (!req->r_gss_ctx) {
4775 return 0;
4776 }
4777 switch (req->r_auth) {
4778 case RPCAUTH_KRB5:
4779 case RPCAUTH_KRB5I:
4780 case RPCAUTH_KRB5P:
4781 return 1;
4782 }
4783 return 0;
4784 }
4785
4786 /*
4787 * Perform an NFS request synchronously.
4788 */
4789
4790 int
4791 nfs_request(
4792 nfsnode_t np,
4793 mount_t mp, /* used only if !np */
4794 struct nfsm_chain *nmrest,
4795 int procnum,
4796 vfs_context_t ctx,
4797 struct nfsreq_secinfo_args *si,
4798 struct nfsm_chain *nmrepp,
4799 u_int64_t *xidp,
4800 int *status)
4801 {
4802 return nfs_request2(np, mp, nmrest, procnum,
4803 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4804 si, 0, nmrepp, xidp, status);
4805 }
4806
4807 int
4808 nfs_request2(
4809 nfsnode_t np,
4810 mount_t mp, /* used only if !np */
4811 struct nfsm_chain *nmrest,
4812 int procnum,
4813 thread_t thd,
4814 kauth_cred_t cred,
4815 struct nfsreq_secinfo_args *si,
4816 int flags,
4817 struct nfsm_chain *nmrepp,
4818 u_int64_t *xidp,
4819 int *status)
4820 {
4821 struct nfsreq rq, *req = &rq;
4822 int error;
4823
4824 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4825 return error;
4826 }
4827 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
4828 if (si) {
4829 req->r_secinfo = *si;
4830 }
4831
4832 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4833 do {
4834 req->r_error = 0;
4835 req->r_flags &= ~R_RESTART;
4836 if ((error = nfs_request_add_header(req))) {
4837 break;
4838 }
4839 if (xidp) {
4840 *xidp = req->r_xid;
4841 }
4842 if ((error = nfs_request_send(req, 1))) {
4843 break;
4844 }
4845 nfs_request_wait(req);
4846 if ((error = nfs_request_finish(req, nmrepp, status))) {
4847 break;
4848 }
4849 } while (req->r_flags & R_RESTART);
4850
4851 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4852 nfs_request_rele(req);
4853 return error;
4854 }
4855
4856
4857 #if CONFIG_NFS_GSS
4858 /*
4859 * Set up a new null proc request to exchange GSS context tokens with the
4860 * server. Associate the context that we are setting up with the request that we
4861 * are sending.
4862 */
4863
4864 int
4865 nfs_request_gss(
4866 mount_t mp,
4867 struct nfsm_chain *nmrest,
4868 thread_t thd,
4869 kauth_cred_t cred,
4870 int flags,
4871 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4872 struct nfsm_chain *nmrepp,
4873 int *status)
4874 {
4875 struct nfsreq rq, *req = &rq;
4876 int error, wait = 1;
4877
4878 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4879 return error;
4880 }
4881 req->r_flags |= (flags & R_OPTMASK);
4882
4883 if (cp == NULL) {
4884 printf("nfs_request_gss request has no context\n");
4885 nfs_request_rele(req);
4886 return NFSERR_EAUTH;
4887 }
4888 nfs_gss_clnt_ctx_ref(req, cp);
4889
4890 /*
4891 * Don't wait for a reply to a context destroy advisory
4892 * to avoid hanging on a dead server.
4893 */
4894 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
4895 wait = 0;
4896 }
4897
4898 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4899 do {
4900 req->r_error = 0;
4901 req->r_flags &= ~R_RESTART;
4902 if ((error = nfs_request_add_header(req))) {
4903 break;
4904 }
4905
4906 if ((error = nfs_request_send(req, wait))) {
4907 break;
4908 }
4909 if (!wait) {
4910 break;
4911 }
4912
4913 nfs_request_wait(req);
4914 if ((error = nfs_request_finish(req, nmrepp, status))) {
4915 break;
4916 }
4917 } while (req->r_flags & R_RESTART);
4918
4919 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
4920
4921 nfs_gss_clnt_ctx_unref(req);
4922 nfs_request_rele(req);
4923
4924 return error;
4925 }
4926 #endif /* CONFIG_NFS_GSS */
4927
4928 /*
4929 * Create and start an asynchronous NFS request.
4930 */
4931 int
4932 nfs_request_async(
4933 nfsnode_t np,
4934 mount_t mp, /* used only if !np */
4935 struct nfsm_chain *nmrest,
4936 int procnum,
4937 thread_t thd,
4938 kauth_cred_t cred,
4939 struct nfsreq_secinfo_args *si,
4940 int flags,
4941 struct nfsreq_cbinfo *cb,
4942 struct nfsreq **reqp)
4943 {
4944 struct nfsreq *req;
4945 struct nfsmount *nmp;
4946 int error, sent;
4947
4948 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
4949 req = *reqp;
4950 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
4951 if (error) {
4952 return error;
4953 }
4954 req->r_flags |= (flags & R_OPTMASK);
4955 req->r_flags |= R_ASYNC;
4956 if (si) {
4957 req->r_secinfo = *si;
4958 }
4959 if (cb) {
4960 req->r_callback = *cb;
4961 }
4962 error = nfs_request_add_header(req);
4963 if (!error) {
4964 req->r_flags |= R_WAITSENT;
4965 if (req->r_callback.rcb_func) {
4966 nfs_request_ref(req, 0);
4967 }
4968 error = nfs_request_send(req, 1);
4969 lck_mtx_lock(&req->r_mtx);
4970 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
4971 /* make sure to wait until this async I/O request gets sent */
4972 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4973 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
4974 while (!(req->r_flags & R_SENT)) {
4975 nmp = req->r_nmp;
4976 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
4977 lck_mtx_lock(&nmp->nm_lock);
4978 if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
4979 /*
4980 * It's not going to get off the resend queue if we're in recovery.
4981 * So, just take it off ourselves. We could be holding mount state
4982 * busy and thus holding up the start of recovery.
4983 */
4984 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4985 req->r_rchain.tqe_next = NFSREQNOLIST;
4986 if (req->r_flags & R_RESENDQ) {
4987 req->r_flags &= ~R_RESENDQ;
4988 }
4989 lck_mtx_unlock(&nmp->nm_lock);
4990 req->r_flags |= R_SENDING;
4991 lck_mtx_unlock(&req->r_mtx);
4992 error = nfs_send(req, 1);
4993 /* Remove the R_RESENDQ reference */
4994 nfs_request_rele(req);
4995 lck_mtx_lock(&req->r_mtx);
4996 if (error) {
4997 break;
4998 }
4999 continue;
5000 }
5001 lck_mtx_unlock(&nmp->nm_lock);
5002 }
5003 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5004 break;
5005 }
5006 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5007 slpflag = 0;
5008 }
5009 }
5010 sent = req->r_flags & R_SENT;
5011 lck_mtx_unlock(&req->r_mtx);
5012 if (error && req->r_callback.rcb_func && !sent) {
5013 nfs_request_rele(req);
5014 }
5015 }
5016 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
5017 if (error || req->r_callback.rcb_func) {
5018 nfs_request_rele(req);
5019 }
5020
5021 return error;
5022 }
5023
5024 /*
5025 * Wait for and finish an asynchronous NFS request.
5026 */
5027 int
5028 nfs_request_async_finish(
5029 struct nfsreq *req,
5030 struct nfsm_chain *nmrepp,
5031 u_int64_t *xidp,
5032 int *status)
5033 {
5034 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
5035 struct nfsmount *nmp;
5036
5037 lck_mtx_lock(&req->r_mtx);
5038 if (!asyncio) {
5039 req->r_flags |= R_ASYNCWAIT;
5040 }
5041 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
5042 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5043
5044 if ((nmp = req->r_nmp)) {
5045 lck_mtx_lock(&nmp->nm_lock);
5046 if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5047 /*
5048 * It's not going to get off the resend queue if we're in recovery.
5049 * So, just take it off ourselves. We could be holding mount state
5050 * busy and thus holding up the start of recovery.
5051 */
5052 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5053 req->r_rchain.tqe_next = NFSREQNOLIST;
5054 if (req->r_flags & R_RESENDQ) {
5055 req->r_flags &= ~R_RESENDQ;
5056 }
5057 /* Remove the R_RESENDQ reference */
5058 assert(req->r_refs > 0);
5059 req->r_refs--;
5060 lck_mtx_unlock(&nmp->nm_lock);
5061 break;
5062 }
5063 lck_mtx_unlock(&nmp->nm_lock);
5064 }
5065 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5066 break;
5067 }
5068 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
5069 }
5070 lck_mtx_unlock(&req->r_mtx);
5071
5072 if (!error) {
5073 nfs_request_wait(req);
5074 error = nfs_request_finish(req, nmrepp, status);
5075 }
5076
5077 while (!error && (req->r_flags & R_RESTART)) {
5078 if (asyncio) {
5079 assert(req->r_achain.tqe_next == NFSREQNOLIST);
5080 lck_mtx_lock(&req->r_mtx);
5081 req->r_flags &= ~R_IOD;
5082 if (req->r_resendtime) { /* send later */
5083 nfs_asyncio_resend(req);
5084 lck_mtx_unlock(&req->r_mtx);
5085 return EINPROGRESS;
5086 }
5087 lck_mtx_unlock(&req->r_mtx);
5088 }
5089 req->r_error = 0;
5090 req->r_flags &= ~R_RESTART;
5091 if ((error = nfs_request_add_header(req))) {
5092 break;
5093 }
5094 if ((error = nfs_request_send(req, !asyncio))) {
5095 break;
5096 }
5097 if (asyncio) {
5098 return EINPROGRESS;
5099 }
5100 nfs_request_wait(req);
5101 if ((error = nfs_request_finish(req, nmrepp, status))) {
5102 break;
5103 }
5104 }
5105 if (xidp) {
5106 *xidp = req->r_xid;
5107 }
5108
5109 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5110 nfs_request_rele(req);
5111 return error;
5112 }
5113
5114 /*
5115 * Cancel a pending asynchronous NFS request.
5116 */
5117 void
5118 nfs_request_async_cancel(struct nfsreq *req)
5119 {
5120 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5121 nfs_request_rele(req);
5122 }
5123
5124 /*
5125 * Flag a request as being terminated.
5126 */
5127 void
5128 nfs_softterm(struct nfsreq *req)
5129 {
5130 struct nfsmount *nmp = req->r_nmp;
5131 req->r_flags |= R_SOFTTERM;
5132 req->r_error = ETIMEDOUT;
5133 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
5134 return;
5135 }
5136 /* update congestion window */
5137 req->r_flags &= ~R_CWND;
5138 lck_mtx_lock(&nmp->nm_lock);
5139 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5140 nmp->nm_sent -= NFS_CWNDSCALE;
5141 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5142 /* congestion window is open, poke the cwnd queue */
5143 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5144 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5145 req2->r_cchain.tqe_next = NFSREQNOLIST;
5146 wakeup(req2);
5147 }
5148 lck_mtx_unlock(&nmp->nm_lock);
5149 }
5150
5151 /*
5152 * Ensure req isn't in use by the timer, then dequeue it.
5153 */
5154 void
5155 nfs_reqdequeue(struct nfsreq *req)
5156 {
5157 lck_mtx_lock(nfs_request_mutex);
5158 while (req->r_lflags & RL_BUSY) {
5159 req->r_lflags |= RL_WAITING;
5160 msleep(&req->r_lflags, nfs_request_mutex, PSOCK, "reqdeq", NULL);
5161 }
5162 if (req->r_lflags & RL_QUEUED) {
5163 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5164 req->r_lflags &= ~RL_QUEUED;
5165 }
5166 lck_mtx_unlock(nfs_request_mutex);
5167 }
5168
5169 /*
5170 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5171 * free()'d out from under it.
5172 */
5173 void
5174 nfs_reqbusy(struct nfsreq *req)
5175 {
5176 if (req->r_lflags & RL_BUSY) {
5177 panic("req locked");
5178 }
5179 req->r_lflags |= RL_BUSY;
5180 }
5181
5182 /*
5183 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5184 */
5185 struct nfsreq *
5186 nfs_reqnext(struct nfsreq *req)
5187 {
5188 struct nfsreq * nextreq;
5189
5190 if (req == NULL) {
5191 return NULL;
5192 }
5193 /*
5194 * We need to get and busy the next req before signalling the
5195 * current one, otherwise wakeup() may block us and we'll race to
5196 * grab the next req.
5197 */
5198 nextreq = TAILQ_NEXT(req, r_chain);
5199 if (nextreq != NULL) {
5200 nfs_reqbusy(nextreq);
5201 }
5202 /* unbusy and signal. */
5203 req->r_lflags &= ~RL_BUSY;
5204 if (req->r_lflags & RL_WAITING) {
5205 req->r_lflags &= ~RL_WAITING;
5206 wakeup(&req->r_lflags);
5207 }
5208 return nextreq;
5209 }
5210
5211 /*
5212 * NFS request queue timer routine
5213 *
5214 * Scan the NFS request queue for any requests that have timed out.
5215 *
5216 * Alert the system of unresponsive servers.
5217 * Mark expired requests on soft mounts as terminated.
5218 * For UDP, mark/signal requests for retransmission.
5219 */
5220 void
5221 nfs_request_timer(__unused void *param0, __unused void *param1)
5222 {
5223 struct nfsreq *req;
5224 struct nfsmount *nmp;
5225 int timeo, maxtime, finish_asyncio, error;
5226 struct timeval now;
5227 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
5228 TAILQ_INIT(&nfs_mount_poke_queue);
5229
5230 restart:
5231 lck_mtx_lock(nfs_request_mutex);
5232 req = TAILQ_FIRST(&nfs_reqq);
5233 if (req == NULL) { /* no requests - turn timer off */
5234 nfs_request_timer_on = 0;
5235 lck_mtx_unlock(nfs_request_mutex);
5236 return;
5237 }
5238
5239 nfs_reqbusy(req);
5240
5241 microuptime(&now);
5242 for (; req != NULL; req = nfs_reqnext(req)) {
5243 nmp = req->r_nmp;
5244 if (nmp == NULL) {
5245 NFS_SOCK_DBG("Found a request with out a mount!\n");
5246 continue;
5247 }
5248 if (req->r_error || req->r_nmrep.nmc_mhead) {
5249 continue;
5250 }
5251 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5252 if (req->r_callback.rcb_func != NULL) {
5253 /* async I/O RPC needs to be finished */
5254 lck_mtx_lock(&req->r_mtx);
5255 req->r_error = error;
5256 finish_asyncio = !(req->r_flags & R_WAITSENT);
5257 wakeup(req);
5258 lck_mtx_unlock(&req->r_mtx);
5259 if (finish_asyncio) {
5260 nfs_asyncio_finish(req);
5261 }
5262 }
5263 continue;
5264 }
5265
5266 lck_mtx_lock(&req->r_mtx);
5267
5268 if (nmp->nm_tprintf_initial_delay &&
5269 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5270 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5271 req->r_lastmsg = now.tv_sec;
5272 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5273 "not responding", 1);
5274 req->r_flags |= R_TPRINTFMSG;
5275 lck_mtx_lock(&nmp->nm_lock);
5276 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
5277 lck_mtx_unlock(&nmp->nm_lock);
5278 /* we're not yet completely mounted and */
5279 /* we can't complete an RPC, so we fail */
5280 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5281 nfs_softterm(req);
5282 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5283 wakeup(req);
5284 lck_mtx_unlock(&req->r_mtx);
5285 if (finish_asyncio) {
5286 nfs_asyncio_finish(req);
5287 }
5288 continue;
5289 }
5290 lck_mtx_unlock(&nmp->nm_lock);
5291 }
5292
5293 /*
5294 * Put a reasonable limit on the maximum timeout,
5295 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5296 */
5297 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
5298 maxtime = NFS_MAXTIMEO;
5299 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5300 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5301 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5302 } else {
5303 maxtime = NFS_MINTIMEO / 4;
5304 }
5305
5306 /*
5307 * Check for request timeout.
5308 */
5309 if (req->r_rtt >= 0) {
5310 req->r_rtt++;
5311 lck_mtx_lock(&nmp->nm_lock);
5312 if (req->r_flags & R_RESENDERR) {
5313 /* with resend errors, retry every few seconds */
5314 timeo = 4 * hz;
5315 } else {
5316 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
5317 timeo = NFS_MINIDEMTIMEO; // gss context setup
5318 } else if (NMFLAG(nmp, DUMBTIMER)) {
5319 timeo = nmp->nm_timeo;
5320 } else {
5321 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
5322 }
5323
5324 /* ensure 62.5 ms floor */
5325 while (16 * timeo < hz) {
5326 timeo *= 2;
5327 }
5328 if (nmp->nm_timeouts > 0) {
5329 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
5330 }
5331 }
5332 /* limit timeout to max */
5333 if (timeo > maxtime) {
5334 timeo = maxtime;
5335 }
5336 if (req->r_rtt <= timeo) {
5337 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
5338 lck_mtx_unlock(&nmp->nm_lock);
5339 lck_mtx_unlock(&req->r_mtx);
5340 continue;
5341 }
5342 /* The request has timed out */
5343 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5344 req->r_procnum, proct[req->r_procnum],
5345 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5346 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5347 if (nmp->nm_timeouts < 8) {
5348 nmp->nm_timeouts++;
5349 }
5350 if (nfs_mount_check_dead_timeout(nmp)) {
5351 /* Unbusy this request */
5352 req->r_lflags &= ~RL_BUSY;
5353 if (req->r_lflags & RL_WAITING) {
5354 req->r_lflags &= ~RL_WAITING;
5355 wakeup(&req->r_lflags);
5356 }
5357 lck_mtx_unlock(&req->r_mtx);
5358
5359 /* No need to poke this mount */
5360 if (nmp->nm_sockflags & NMSOCK_POKE) {
5361 nmp->nm_sockflags &= ~NMSOCK_POKE;
5362 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5363 }
5364 /* Release our lock state, so we can become a zombie */
5365 lck_mtx_unlock(nfs_request_mutex);
5366
5367 /*
5368 * Note nfs_mount_make zombie(nmp) must be
5369 * called with nm_lock held. After doing some
5370 * work we release nm_lock in
5371 * nfs_make_mount_zombie with out acquiring any
5372 * other locks. (Later, in nfs_mount_zombie we
5373 * will acquire nfs_request_mutex, r_mtx,
5374 * nm_lock in that order). So we should not be
5375 * introducing deadlock here. We take a reference
5376 * on the mount so that its still there when we
5377 * release the lock.
5378 */
5379 nmp->nm_ref++;
5380 nfs_mount_make_zombie(nmp);
5381 lck_mtx_unlock(&nmp->nm_lock);
5382 nfs_mount_rele(nmp);
5383
5384 /*
5385 * All the request for this mount have now been
5386 * removed from the request queue. Restart to
5387 * process the remaining mounts
5388 */
5389 goto restart;
5390 }
5391
5392 /* if it's been a few seconds, try poking the socket */
5393 if ((nmp->nm_sotype == SOCK_STREAM) &&
5394 ((now.tv_sec - req->r_start) >= 3) &&
5395 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
5396 (nmp->nm_sockflags & NMSOCK_READY)) {
5397 nmp->nm_sockflags |= NMSOCK_POKE;
5398 /*
5399 * We take a ref on the mount so that we know the mount will still be there
5400 * when we process the nfs_mount_poke_queue. An unmount request will block
5401 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5402 * the reference after calling nfs_sock_poke below;
5403 */
5404 nmp->nm_ref++;
5405 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5406 }
5407 lck_mtx_unlock(&nmp->nm_lock);
5408 }
5409
5410 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5411 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
5412 ((req->r_rexmit >= req->r_retry) || /* too many */
5413 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
5414 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5415 lck_mtx_lock(&nmp->nm_lock);
5416 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5417 lck_mtx_unlock(&nmp->nm_lock);
5418 /* make sure we note the unresponsive server */
5419 /* (maxtime may be less than tprintf delay) */
5420 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5421 "not responding", 1);
5422 req->r_lastmsg = now.tv_sec;
5423 req->r_flags |= R_TPRINTFMSG;
5424 } else {
5425 lck_mtx_unlock(&nmp->nm_lock);
5426 }
5427 if (req->r_flags & R_NOINTR) {
5428 /* don't terminate nointr requests on timeout */
5429 lck_mtx_unlock(&req->r_mtx);
5430 continue;
5431 }
5432 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5433 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5434 now.tv_sec - req->r_start);
5435 nfs_softterm(req);
5436 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5437 wakeup(req);
5438 lck_mtx_unlock(&req->r_mtx);
5439 if (finish_asyncio) {
5440 nfs_asyncio_finish(req);
5441 }
5442 continue;
5443 }
5444
5445 /* for TCP, only resend if explicitly requested */
5446 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
5447 if (++req->r_rexmit > NFS_MAXREXMIT) {
5448 req->r_rexmit = NFS_MAXREXMIT;
5449 }
5450 req->r_rtt = 0;
5451 lck_mtx_unlock(&req->r_mtx);
5452 continue;
5453 }
5454
5455 /*
5456 * The request needs to be (re)sent. Kick the requester to resend it.
5457 * (unless it's already marked as needing a resend)
5458 */
5459 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5460 lck_mtx_unlock(&req->r_mtx);
5461 continue;
5462 }
5463 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5464 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
5465 req->r_flags |= R_MUSTRESEND;
5466 req->r_rtt = -1;
5467 wakeup(req);
5468 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
5469 nfs_asyncio_resend(req);
5470 }
5471 lck_mtx_unlock(&req->r_mtx);
5472 }
5473
5474 lck_mtx_unlock(nfs_request_mutex);
5475
5476 /* poke any sockets */
5477 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5478 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5479 nfs_sock_poke(nmp);
5480 nfs_mount_rele(nmp);
5481 }
5482
5483 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
5484 }
5485
5486 /*
5487 * check a thread's proc for the "noremotehang" flag.
5488 */
5489 int
5490 nfs_noremotehang(thread_t thd)
5491 {
5492 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
5493 return p && proc_noremotehang(p);
5494 }
5495
5496 /*
5497 * Test for a termination condition pending on the process.
5498 * This is used to determine if we need to bail on a mount.
5499 * ETIMEDOUT is returned if there has been a soft timeout.
5500 * EINTR is returned if there is a signal pending that is not being ignored
5501 * and the mount is interruptable, or if we are a thread that is in the process
5502 * of cancellation (also SIGKILL posted).
5503 */
5504 extern int sigprop[NSIG + 1];
5505 int
5506 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
5507 {
5508 proc_t p;
5509 int error = 0;
5510
5511 if (!nmp) {
5512 return ENXIO;
5513 }
5514
5515 if (req && (req->r_flags & R_SOFTTERM)) {
5516 return ETIMEDOUT; /* request has been terminated. */
5517 }
5518 if (req && (req->r_flags & R_NOINTR)) {
5519 thd = NULL; /* don't check for signal on R_NOINTR */
5520 }
5521 if (!nmplocked) {
5522 lck_mtx_lock(&nmp->nm_lock);
5523 }
5524 if (nmp->nm_state & NFSSTA_FORCE) {
5525 /* If a force unmount is in progress then fail. */
5526 error = EIO;
5527 } else if (vfs_isforce(nmp->nm_mountp)) {
5528 /* Someone is unmounting us, go soft and mark it. */
5529 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
5530 nmp->nm_state |= NFSSTA_FORCE;
5531 }
5532
5533 /* Check if the mount is marked dead. */
5534 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
5535 error = ENXIO;
5536 }
5537
5538 /*
5539 * If the mount is hung and we've requested not to hang
5540 * on remote filesystems, then bail now.
5541 */
5542 if (current_proc() != kernproc &&
5543 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
5544 error = EIO;
5545 }
5546
5547 if (!nmplocked) {
5548 lck_mtx_unlock(&nmp->nm_lock);
5549 }
5550 if (error) {
5551 return error;
5552 }
5553
5554 /* may not have a thread for async I/O */
5555 if (thd == NULL || current_proc() == kernproc) {
5556 return 0;
5557 }
5558
5559 /*
5560 * Check if the process is aborted, but don't interrupt if we
5561 * were killed by a signal and this is the exiting thread which
5562 * is attempting to dump core.
5563 */
5564 if (((p = current_proc()) != kernproc) && current_thread_aborted() &&
5565 (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) ||
5566 (p->p_sigacts == NULL) ||
5567 (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) ||
5568 !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) {
5569 return EINTR;
5570 }
5571
5572 /* mask off thread and process blocked signals. */
5573 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
5574 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5575 return EINTR;
5576 }
5577 return 0;
5578 }
5579
5580 /*
5581 * Lock a socket against others.
5582 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5583 * and also to avoid race conditions between the processes with nfs requests
5584 * in progress when a reconnect is necessary.
5585 */
5586 int
5587 nfs_sndlock(struct nfsreq *req)
5588 {
5589 struct nfsmount *nmp = req->r_nmp;
5590 int *statep;
5591 int error = 0, slpflag = 0;
5592 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
5593
5594 if (nfs_mount_gone(nmp)) {
5595 return ENXIO;
5596 }
5597
5598 lck_mtx_lock(&nmp->nm_lock);
5599 statep = &nmp->nm_state;
5600
5601 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
5602 slpflag = PCATCH;
5603 }
5604 while (*statep & NFSSTA_SNDLOCK) {
5605 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
5606 break;
5607 }
5608 *statep |= NFSSTA_WANTSND;
5609 if (nfs_noremotehang(req->r_thread)) {
5610 ts.tv_sec = 1;
5611 }
5612 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
5613 if (slpflag == PCATCH) {
5614 slpflag = 0;
5615 ts.tv_sec = 2;
5616 }
5617 }
5618 if (!error) {
5619 *statep |= NFSSTA_SNDLOCK;
5620 }
5621 lck_mtx_unlock(&nmp->nm_lock);
5622 return error;
5623 }
5624
5625 /*
5626 * Unlock the stream socket for others.
5627 */
5628 void
5629 nfs_sndunlock(struct nfsreq *req)
5630 {
5631 struct nfsmount *nmp = req->r_nmp;
5632 int *statep, wake = 0;
5633
5634 if (!nmp) {
5635 return;
5636 }
5637 lck_mtx_lock(&nmp->nm_lock);
5638 statep = &nmp->nm_state;
5639 if ((*statep & NFSSTA_SNDLOCK) == 0) {
5640 panic("nfs sndunlock");
5641 }
5642 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
5643 if (*statep & NFSSTA_WANTSND) {
5644 *statep &= ~NFSSTA_WANTSND;
5645 wake = 1;
5646 }
5647 lck_mtx_unlock(&nmp->nm_lock);
5648 if (wake) {
5649 wakeup(statep);
5650 }
5651 }
5652
5653 int
5654 nfs_aux_request(
5655 struct nfsmount *nmp,
5656 thread_t thd,
5657 struct sockaddr *saddr,
5658 socket_t so,
5659 int sotype,
5660 mbuf_t mreq,
5661 uint32_t xid,
5662 int bindresv,
5663 int timeo,
5664 struct nfsm_chain *nmrep)
5665 {
5666 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5667 socket_t newso = NULL;
5668 struct sockaddr_storage ss;
5669 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
5670 mbuf_t m, mrep = NULL;
5671 struct msghdr msg;
5672 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5673 uint32_t verf_type, verf_len, accepted_status;
5674 size_t readlen, sentlen;
5675 struct nfs_rpc_record_state nrrs;
5676
5677 if (!so) {
5678 /* create socket and set options */
5679 if (saddr->sa_family == AF_LOCAL) {
5680 soproto = 0;
5681 } else {
5682 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5683 }
5684 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
5685 goto nfsmout;
5686 }
5687
5688 if (bindresv && saddr->sa_family != AF_LOCAL) {
5689 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5690 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5691 int portrange = IP_PORTRANGE_LOW;
5692 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5693 nfsmout_if(error);
5694 ss.ss_len = saddr->sa_len;
5695 ss.ss_family = saddr->sa_family;
5696 if (ss.ss_family == AF_INET) {
5697 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5698 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5699 } else if (ss.ss_family == AF_INET6) {
5700 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5701 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5702 } else {
5703 error = EINVAL;
5704 }
5705 if (!error) {
5706 error = sock_bind(newso, (struct sockaddr *)&ss);
5707 }
5708 nfsmout_if(error);
5709 }
5710
5711 if (sotype == SOCK_STREAM) {
5712 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5713 int count = 0;
5714
5715 error = sock_connect(newso, saddr, MSG_DONTWAIT);
5716 if (error == EINPROGRESS) {
5717 error = 0;
5718 }
5719 nfsmout_if(error);
5720
5721 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5722 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5723 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5724 error = ETIMEDOUT;
5725 break;
5726 }
5727 }
5728 nfsmout_if(error);
5729 }
5730 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5731 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
5732 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
5733 goto nfsmout;
5734 }
5735 so = newso;
5736 } else {
5737 /* make sure socket is using a one second timeout in this function */
5738 optlen = sizeof(orig_rcvto);
5739 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5740 if (!error) {
5741 optlen = sizeof(orig_sndto);
5742 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5743 }
5744 if (!error) {
5745 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5746 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5747 restoreto = 1;
5748 }
5749 }
5750
5751 if (sotype == SOCK_STREAM) {
5752 sendat = 0; /* we only resend the request for UDP */
5753 nfs_rpc_record_state_init(&nrrs);
5754 }
5755
5756 for (try = 0; try < timeo; try++) {
5757 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
5758 break;
5759 }
5760 if (!try || (try == sendat)) {
5761 /* send the request (resending periodically for UDP) */
5762 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
5763 goto nfsmout;
5764 }
5765 bzero(&msg, sizeof(msg));
5766 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5767 msg.msg_name = saddr;
5768 msg.msg_namelen = saddr->sa_len;
5769 }
5770 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
5771 goto nfsmout;
5772 }
5773 sendat *= 2;
5774 if (sendat > 30) {
5775 sendat = 30;
5776 }
5777 }
5778 /* wait for the response */
5779 if (sotype == SOCK_STREAM) {
5780 /* try to read (more of) record */
5781 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5782 /* if we don't have the whole record yet, we'll keep trying */
5783 } else {
5784 readlen = 1 << 18;
5785 bzero(&msg, sizeof(msg));
5786 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5787 }
5788 if (error == EWOULDBLOCK) {
5789 continue;
5790 }
5791 nfsmout_if(error);
5792 /* parse the response */
5793 nfsm_chain_dissect_init(error, nmrep, mrep);
5794 nfsm_chain_get_32(error, nmrep, rxid);
5795 nfsm_chain_get_32(error, nmrep, reply);
5796 nfsmout_if(error);
5797 if ((rxid != xid) || (reply != RPC_REPLY)) {
5798 error = EBADRPC;
5799 }
5800 nfsm_chain_get_32(error, nmrep, reply_status);
5801 nfsmout_if(error);
5802 if (reply_status == RPC_MSGDENIED) {
5803 nfsm_chain_get_32(error, nmrep, rejected_status);
5804 nfsmout_if(error);
5805 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
5806 goto nfsmout;
5807 }
5808 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5809 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5810 nfsmout_if(error);
5811 if (verf_len) {
5812 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
5813 }
5814 nfsm_chain_get_32(error, nmrep, accepted_status);
5815 nfsmout_if(error);
5816 switch (accepted_status) {
5817 case RPC_SUCCESS:
5818 error = 0;
5819 break;
5820 case RPC_PROGUNAVAIL:
5821 error = EPROGUNAVAIL;
5822 break;
5823 case RPC_PROGMISMATCH:
5824 error = EPROGMISMATCH;
5825 break;
5826 case RPC_PROCUNAVAIL:
5827 error = EPROCUNAVAIL;
5828 break;
5829 case RPC_GARBAGE:
5830 error = EBADRPC;
5831 break;
5832 case RPC_SYSTEM_ERR:
5833 default:
5834 error = EIO;
5835 break;
5836 }
5837 break;
5838 }
5839 nfsmout:
5840 if (restoreto) {
5841 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5842 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5843 }
5844 if (newso) {
5845 sock_shutdown(newso, SHUT_RDWR);
5846 sock_close(newso);
5847 }
5848 mbuf_freem(mreq);
5849 return error;
5850 }
5851
5852 int
5853 nfs_portmap_lookup(
5854 struct nfsmount *nmp,
5855 vfs_context_t ctx,
5856 struct sockaddr *sa,
5857 socket_t so,
5858 uint32_t protocol,
5859 uint32_t vers,
5860 uint32_t stype,
5861 int timeo)
5862 {
5863 thread_t thd = vfs_context_thread(ctx);
5864 kauth_cred_t cred = vfs_context_ucred(ctx);
5865 struct sockaddr_storage ss;
5866 struct sockaddr *saddr = (struct sockaddr*)&ss;
5867 static struct sockaddr_un rpcbind_cots = {
5868 sizeof(struct sockaddr_un),
5869 AF_LOCAL,
5870 RPCB_TICOTSORD_PATH
5871 };
5872 static struct sockaddr_un rpcbind_clts = {
5873 sizeof(struct sockaddr_un),
5874 AF_LOCAL,
5875 RPCB_TICLTS_PATH
5876 };
5877 struct nfsm_chain nmreq, nmrep;
5878 mbuf_t mreq;
5879 int error = 0, ip, pmprog, pmvers, pmproc;
5880 uint32_t ualen = 0;
5881 uint32_t port;
5882 uint64_t xid = 0;
5883 char uaddr[MAX_IPv6_STR_LEN + 16];
5884
5885 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5886 if (saddr->sa_family == AF_INET) {
5887 ip = 4;
5888 pmprog = PMAPPROG;
5889 pmvers = PMAPVERS;
5890 pmproc = PMAPPROC_GETPORT;
5891 } else if (saddr->sa_family == AF_INET6) {
5892 ip = 6;
5893 pmprog = RPCBPROG;
5894 pmvers = RPCBVERS4;
5895 pmproc = RPCBPROC_GETVERSADDR;
5896 } else if (saddr->sa_family == AF_LOCAL) {
5897 ip = 0;
5898 pmprog = RPCBPROG;
5899 pmvers = RPCBVERS4;
5900 pmproc = RPCBPROC_GETVERSADDR;
5901 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5902 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
5903 } else {
5904 return EINVAL;
5905 }
5906 nfsm_chain_null(&nmreq);
5907 nfsm_chain_null(&nmrep);
5908
5909 tryagain:
5910 /* send portmapper request to get port/uaddr */
5911 if (ip == 4) {
5912 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
5913 } else if (ip == 6) {
5914 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
5915 }
5916 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
5917 nfsm_chain_add_32(error, &nmreq, protocol);
5918 nfsm_chain_add_32(error, &nmreq, vers);
5919 if (ip == 4) {
5920 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
5921 nfsm_chain_add_32(error, &nmreq, 0);
5922 } else {
5923 if (stype == SOCK_STREAM) {
5924 if (ip == 6) {
5925 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5926 } else {
5927 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5928 }
5929 } else {
5930 if (ip == 6) {
5931 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
5932 } else {
5933 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
5934 }
5935 }
5936 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
5937 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
5938 }
5939 nfsm_chain_build_done(error, &nmreq);
5940 nfsmout_if(error);
5941 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
5942 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
5943 nfsmout_if(error);
5944 nmreq.nmc_mhead = NULL;
5945
5946 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
5947 error = nfs_aux_request(nmp, thd, saddr, so,
5948 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
5949 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
5950 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers,
5951 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
5952 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket");
5953
5954 /* grab port from portmap response */
5955 if (ip == 4) {
5956 nfsm_chain_get_32(error, &nmrep, port);
5957 if (!error) {
5958 ((struct sockaddr_in*)sa)->sin_port = htons(port);
5959 }
5960 } else {
5961 /* get uaddr string and convert to sockaddr */
5962 nfsm_chain_get_32(error, &nmrep, ualen);
5963 if (!error) {
5964 if (ualen > (sizeof(uaddr) - 1)) {
5965 error = EIO;
5966 }
5967 if (ualen < 1) {
5968 /* program is not available, just return a zero port */
5969 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5970 if (ip == 6) {
5971 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
5972 } else {
5973 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
5974 }
5975 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
5976 } else {
5977 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
5978 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
5979 if (!error) {
5980 uaddr[ualen] = '\0';
5981 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
5982 error = EIO;
5983 }
5984 }
5985 }
5986 }
5987 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
5988 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
5989 if (pmvers == RPCBVERS4) {
5990 /* fall back to v3 and GETADDR */
5991 pmvers = RPCBVERS3;
5992 pmproc = RPCBPROC_GETADDR;
5993 nfsm_chain_cleanup(&nmreq);
5994 nfsm_chain_cleanup(&nmrep);
5995 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5996 xid = 0;
5997 error = 0;
5998 goto tryagain;
5999 }
6000 }
6001 if (!error) {
6002 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
6003 }
6004 }
6005 nfsmout:
6006 nfsm_chain_cleanup(&nmreq);
6007 nfsm_chain_cleanup(&nmrep);
6008 NFS_SOCK_DBG("Returned %d\n", error);
6009
6010 return error;
6011 }
6012
6013 int
6014 nfs_msg(thread_t thd,
6015 const char *server,
6016 const char *msg,
6017 int error)
6018 {
6019 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6020 tpr_t tpr;
6021
6022 if (p) {
6023 tpr = tprintf_open(p);
6024 } else {
6025 tpr = NULL;
6026 }
6027 if (error) {
6028 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
6029 } else {
6030 tprintf(tpr, "nfs server %s: %s\n", server, msg);
6031 }
6032 tprintf_close(tpr);
6033 return 0;
6034 }
6035
6036 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6037 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6038 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6039 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6040 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6041
6042 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6043 int32_t nfs_is_mobile;
6044
6045 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6046 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6047
6048 /*
6049 * Could this mount be squished?
6050 */
6051 int
6052 nfs_can_squish(struct nfsmount *nmp)
6053 {
6054 uint64_t flags = vfs_flags(nmp->nm_mountp);
6055 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6056
6057 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6058 return 0;
6059 }
6060
6061 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6062 return 0;
6063 }
6064
6065 return 1;
6066 }
6067
6068 /*
6069 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6070 * the mount may become "not responding". It's desirable to be able
6071 * to unmount these dead mounts, but only if there is no risk of
6072 * losing data or crashing applications. A "squishy" NFS mount is one
6073 * that can be force unmounted with little risk of harm.
6074 *
6075 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6076 * in a squishy state iff it is allowed to be squishy and there are no
6077 * dirty pages and there are no mmapped files and there are no files
6078 * open for write. Mounts are allowed to be squishy is controlled by
6079 * the settings of the nfs_squishy_flags and its mobility state. These
6080 * flags can be set by sysctls.
6081 *
6082 * If nfs_is_squishy determines that we are in a squishy state we will
6083 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6084 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6085 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6086 * update the current dead timeout with the mount's nm_deadtimeout
6087 * value set at mount time.
6088 *
6089 * Assumes that nm_lock is held.
6090 *
6091 * Note this routine is racey, but its effects on setting the
6092 * dead timeout only have effects when we're in trouble and are likely
6093 * to stay that way. Since by default its only for automounted
6094 * volumes on mobile machines; this is a reasonable trade off between
6095 * data integrity and user experience. It can be disabled or set via
6096 * nfs.conf file.
6097 */
6098
6099 int
6100 nfs_is_squishy(struct nfsmount *nmp)
6101 {
6102 mount_t mp = nmp->nm_mountp;
6103 int squishy = 0;
6104 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6105
6106 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6107 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
6108
6109 if (!nfs_can_squish(nmp)) {
6110 goto out;
6111 }
6112
6113 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
6114 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
6115
6116 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6117 uint64_t flags = mp ? vfs_flags(mp) : 0;
6118 squishy = 1;
6119
6120 /*
6121 * Walk the nfs nodes and check for dirty buffers it we're not
6122 * RDONLY and we've not already been declared as squishy since
6123 * this can be a bit expensive.
6124 */
6125 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
6126 squishy = !nfs_mount_is_dirty(mp);
6127 }
6128 }
6129
6130 out:
6131 if (squishy) {
6132 nmp->nm_state |= NFSSTA_SQUISHY;
6133 } else {
6134 nmp->nm_state &= ~NFSSTA_SQUISHY;
6135 }
6136
6137 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
6138
6139 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
6140
6141 return squishy;
6142 }
6143
6144 /*
6145 * On a send operation, if we can't reach the server and we've got only one server to talk to
6146 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6147 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6148 */
6149 int
6150 nfs_is_dead(int error, struct nfsmount *nmp)
6151 {
6152 fsid_t fsid;
6153
6154 lck_mtx_lock(&nmp->nm_lock);
6155 if (nmp->nm_state & NFSSTA_DEAD) {
6156 lck_mtx_unlock(&nmp->nm_lock);
6157 return 1;
6158 }
6159
6160 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
6161 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6162 lck_mtx_unlock(&nmp->nm_lock);
6163 return 0;
6164 }
6165
6166 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6167 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
6168 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6169 lck_mtx_unlock(&nmp->nm_lock);
6170 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6171 vfs_event_signal(&fsid, VQ_DEAD, 0);
6172 return 1;
6173 }
6174 lck_mtx_unlock(&nmp->nm_lock);
6175 return 0;
6176 }
6177
6178 /*
6179 * If we've experienced timeouts and we're not really a
6180 * classic hard mount, then just return cached data to
6181 * the caller instead of likely hanging on an RPC.
6182 */
6183 int
6184 nfs_use_cache(struct nfsmount *nmp)
6185 {
6186 /*
6187 *%%% We always let mobile users goto the cache,
6188 * perhaps we should not even require them to have
6189 * a timeout?
6190 */
6191 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
6192 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
6193
6194 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6195
6196 /*
6197 * So if we have a timeout and we're not really a hard hard-mount,
6198 * return 1 to not get things out of the cache.
6199 */
6200
6201 return (nmp->nm_state & timeoutmask) && cache_ok;
6202 }
6203
6204 /*
6205 * Log a message that nfs or lockd server is unresponsive. Check if we
6206 * can be squished and if we can, or that our dead timeout has
6207 * expired, and we're not holding state, set our mount as dead, remove
6208 * our mount state and ask to be unmounted. If we are holding state
6209 * we're being called from the nfs_request_timer and will soon detect
6210 * that we need to unmount.
6211 */
6212 void
6213 nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
6214 {
6215 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6216 uint32_t do_vfs_signal = 0;
6217 struct timeval now;
6218
6219 if (nfs_mount_gone(nmp)) {
6220 return;
6221 }
6222
6223 lck_mtx_lock(&nmp->nm_lock);
6224
6225 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6226 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6227 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6228 }
6229 wasunresponsive = (nmp->nm_state & timeoutmask);
6230
6231 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6232 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6233
6234 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
6235 nmp->nm_state |= NFSSTA_TIMEO;
6236 }
6237 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6238 nmp->nm_state |= NFSSTA_LOCKTIMEO;
6239 }
6240 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6241 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
6242 }
6243
6244 unresponsive = (nmp->nm_state & timeoutmask);
6245
6246 nfs_is_squishy(nmp);
6247
6248 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
6249 microuptime(&now);
6250 if (!wasunresponsive) {
6251 nmp->nm_deadto_start = now.tv_sec;
6252 nfs_mount_sock_thread_wake(nmp);
6253 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
6254 if (!(nmp->nm_state & NFSSTA_DEAD)) {
6255 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
6256 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6257 }
6258 do_vfs_signal = VQ_DEAD;
6259 }
6260 }
6261 lck_mtx_unlock(&nmp->nm_lock);
6262
6263 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
6264 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6265 } else if (softnobrowse || wasunresponsive || !unresponsive) {
6266 do_vfs_signal = 0;
6267 } else {
6268 do_vfs_signal = VQ_NOTRESP;
6269 }
6270 if (do_vfs_signal) {
6271 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
6272 }
6273
6274 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6275 }
6276
6277 void
6278 nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6279 {
6280 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6281 int do_vfs_signal;
6282
6283 if (nfs_mount_gone(nmp)) {
6284 return;
6285 }
6286
6287 if (msg) {
6288 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
6289 }
6290
6291 lck_mtx_lock(&nmp->nm_lock);
6292
6293 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6294 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6295 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6296 }
6297 wasunresponsive = (nmp->nm_state & timeoutmask);
6298
6299 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6300 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6301
6302 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
6303 nmp->nm_state &= ~NFSSTA_TIMEO;
6304 }
6305 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6306 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
6307 }
6308 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6309 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
6310 }
6311
6312 unresponsive = (nmp->nm_state & timeoutmask);
6313
6314 nmp->nm_deadto_start = 0;
6315 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6316 nmp->nm_state &= ~NFSSTA_SQUISHY;
6317 lck_mtx_unlock(&nmp->nm_lock);
6318
6319 if (softnobrowse) {
6320 do_vfs_signal = 0;
6321 } else {
6322 do_vfs_signal = (wasunresponsive && !unresponsive);
6323 }
6324 if (do_vfs_signal) {
6325 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
6326 }
6327 }
6328
6329
6330 #endif /* CONFIG_NFS_CLIENT */
6331
6332 #if CONFIG_NFS_SERVER
6333
6334 /*
6335 * Generate the rpc reply header
6336 * siz arg. is used to decide if adding a cluster is worthwhile
6337 */
6338 int
6339 nfsrv_rephead(
6340 struct nfsrv_descript *nd,
6341 __unused struct nfsrv_sock *slp,
6342 struct nfsm_chain *nmrepp,
6343 size_t siz)
6344 {
6345 mbuf_t mrep;
6346 u_int32_t *tl;
6347 struct nfsm_chain nmrep;
6348 int err, error;
6349
6350 err = nd->nd_repstat;
6351 if (err && (nd->nd_vers == NFS_VER2)) {
6352 siz = 0;
6353 }
6354
6355 /*
6356 * If this is a big reply, use a cluster else
6357 * try and leave leading space for the lower level headers.
6358 */
6359 siz += RPC_REPLYSIZ;
6360 if (siz >= nfs_mbuf_minclsize) {
6361 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6362 } else {
6363 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6364 }
6365 if (error) {
6366 /* unable to allocate packet */
6367 /* XXX should we keep statistics for these errors? */
6368 return error;
6369 }
6370 if (siz < nfs_mbuf_minclsize) {
6371 /* leave space for lower level headers */
6372 tl = mbuf_data(mrep);
6373 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
6374 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6375 }
6376 nfsm_chain_init(&nmrep, mrep);
6377 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6378 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6379 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6380 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6381 if (err & NFSERR_AUTHERR) {
6382 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6383 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6384 } else {
6385 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6386 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6387 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6388 }
6389 } else {
6390 /* reply status */
6391 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6392 if (nd->nd_gss_context != NULL) {
6393 /* RPCSEC_GSS verifier */
6394 error = nfs_gss_svc_verf_put(nd, &nmrep);
6395 if (error) {
6396 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6397 goto done;
6398 }
6399 } else {
6400 /* RPCAUTH_NULL verifier */
6401 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6402 nfsm_chain_add_32(error, &nmrep, 0);
6403 }
6404 /* accepted status */
6405 switch (err) {
6406 case EPROGUNAVAIL:
6407 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6408 break;
6409 case EPROGMISMATCH:
6410 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6411 /* XXX hard coded versions? */
6412 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6413 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6414 break;
6415 case EPROCUNAVAIL:
6416 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6417 break;
6418 case EBADRPC:
6419 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6420 break;
6421 default:
6422 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
6423 if (nd->nd_gss_context != NULL) {
6424 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
6425 }
6426 if (err != NFSERR_RETVOID) {
6427 nfsm_chain_add_32(error, &nmrep,
6428 (err ? nfsrv_errmap(nd, err) : 0));
6429 }
6430 break;
6431 }
6432 }
6433
6434 done:
6435 nfsm_chain_build_done(error, &nmrep);
6436 if (error) {
6437 /* error composing reply header */
6438 /* XXX should we keep statistics for these errors? */
6439 mbuf_freem(mrep);
6440 return error;
6441 }
6442
6443 *nmrepp = nmrep;
6444 if ((err != 0) && (err != NFSERR_RETVOID)) {
6445 OSAddAtomic64(1, &nfsstats.srvrpc_errs);
6446 }
6447 return 0;
6448 }
6449
6450 /*
6451 * The nfs server send routine.
6452 *
6453 * - return EINTR or ERESTART if interrupted by a signal
6454 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6455 * - do any cleanup required by recoverable socket errors (???)
6456 */
6457 int
6458 nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
6459 {
6460 int error;
6461 socket_t so = slp->ns_so;
6462 struct sockaddr *sendnam;
6463 struct msghdr msg;
6464
6465 bzero(&msg, sizeof(msg));
6466 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6467 if ((sendnam = mbuf_data(nam))) {
6468 msg.msg_name = (caddr_t)sendnam;
6469 msg.msg_namelen = sendnam->sa_len;
6470 }
6471 }
6472 if (NFS_IS_DBG(NFS_FAC_SRV, 15)) {
6473 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6474 }
6475 error = sock_sendmbuf(so, &msg, top, 0, NULL);
6476 if (!error) {
6477 return 0;
6478 }
6479 log(LOG_INFO, "nfsd send error %d\n", error);
6480
6481 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
6482 error = EPIPE; /* zap TCP sockets if they time out on send */
6483 }
6484 /* Handle any recoverable (soft) socket errors here. (???) */
6485 if (error != EINTR && error != ERESTART && error != EIO &&
6486 error != EWOULDBLOCK && error != EPIPE) {
6487 error = 0;
6488 }
6489
6490 return error;
6491 }
6492
6493 /*
6494 * Socket upcall routine for the nfsd sockets.
6495 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6496 * Essentially do as much as possible non-blocking, else punt and it will
6497 * be called with MBUF_WAITOK from an nfsd.
6498 */
6499 void
6500 nfsrv_rcv(socket_t so, void *arg, int waitflag)
6501 {
6502 struct nfsrv_sock *slp = arg;
6503
6504 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
6505 return;
6506 }
6507
6508 lck_rw_lock_exclusive(&slp->ns_rwlock);
6509 nfsrv_rcv_locked(so, slp, waitflag);
6510 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6511 }
6512 void
6513 nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
6514 {
6515 mbuf_t m, mp, mhck, m2;
6516 int ns_flag = 0, error;
6517 struct msghdr msg;
6518 size_t bytes_read;
6519
6520 if ((slp->ns_flag & SLP_VALID) == 0) {
6521 if (waitflag == MBUF_DONTWAIT) {
6522 lck_rw_done(&slp->ns_rwlock);
6523 }
6524 return;
6525 }
6526
6527 #ifdef notdef
6528 /*
6529 * Define this to test for nfsds handling this under heavy load.
6530 */
6531 if (waitflag == MBUF_DONTWAIT) {
6532 ns_flag = SLP_NEEDQ;
6533 goto dorecs;
6534 }
6535 #endif
6536 if (slp->ns_sotype == SOCK_STREAM) {
6537 /*
6538 * If there are already records on the queue, defer soreceive()
6539 * to an(other) nfsd so that there is feedback to the TCP layer that
6540 * the nfs servers are heavily loaded.
6541 */
6542 if (slp->ns_rec) {
6543 ns_flag = SLP_NEEDQ;
6544 goto dorecs;
6545 }
6546
6547 /*
6548 * Do soreceive().
6549 */
6550 bytes_read = 1000000000;
6551 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6552 if (error || mp == NULL) {
6553 if (error == EWOULDBLOCK) {
6554 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
6555 } else {
6556 ns_flag = SLP_DISCONN;
6557 }
6558 goto dorecs;
6559 }
6560 m = mp;
6561 if (slp->ns_rawend) {
6562 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
6563 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
6564 }
6565 slp->ns_cc += bytes_read;
6566 } else {
6567 slp->ns_raw = m;
6568 slp->ns_cc = bytes_read;
6569 }
6570 while ((m2 = mbuf_next(m))) {
6571 m = m2;
6572 }
6573 slp->ns_rawend = m;
6574
6575 /*
6576 * Now try and parse record(s) out of the raw stream data.
6577 */
6578 error = nfsrv_getstream(slp, waitflag);
6579 if (error) {
6580 if (error == EPERM) {
6581 ns_flag = SLP_DISCONN;
6582 } else {
6583 ns_flag = SLP_NEEDQ;
6584 }
6585 }
6586 } else {
6587 struct sockaddr_storage nam;
6588
6589 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6590 /* already have max # RPC records queued on this socket */
6591 ns_flag = SLP_NEEDQ;
6592 goto dorecs;
6593 }
6594
6595 bzero(&msg, sizeof(msg));
6596 msg.msg_name = (caddr_t)&nam;
6597 msg.msg_namelen = sizeof(nam);
6598
6599 do {
6600 bytes_read = 1000000000;
6601 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
6602 if (mp) {
6603 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6604 mbuf_setlen(mhck, nam.ss_len);
6605 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
6606 m = mhck;
6607 if (mbuf_setnext(m, mp)) {
6608 /* trouble... just drop it */
6609 printf("nfsrv_rcv: mbuf_setnext failed\n");
6610 mbuf_free(mhck);
6611 m = mp;
6612 }
6613 } else {
6614 m = mp;
6615 }
6616 if (slp->ns_recend) {
6617 mbuf_setnextpkt(slp->ns_recend, m);
6618 } else {
6619 slp->ns_rec = m;
6620 slp->ns_flag |= SLP_DOREC;
6621 }
6622 slp->ns_recend = m;
6623 mbuf_setnextpkt(m, NULL);
6624 slp->ns_reccnt++;
6625 }
6626 } while (mp);
6627 }
6628
6629 /*
6630 * Now try and process the request records, non-blocking.
6631 */
6632 dorecs:
6633 if (ns_flag) {
6634 slp->ns_flag |= ns_flag;
6635 }
6636 if (waitflag == MBUF_DONTWAIT) {
6637 int wake = (slp->ns_flag & SLP_WORKTODO);
6638 lck_rw_done(&slp->ns_rwlock);
6639 if (wake && nfsd_thread_count) {
6640 lck_mtx_lock(nfsd_mutex);
6641 nfsrv_wakenfsd(slp);
6642 lck_mtx_unlock(nfsd_mutex);
6643 }
6644 }
6645 }
6646
6647 /*
6648 * Try and extract an RPC request from the mbuf data list received on a
6649 * stream socket. The "waitflag" argument indicates whether or not it
6650 * can sleep.
6651 */
6652 int
6653 nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
6654 {
6655 mbuf_t m;
6656 char *cp1, *cp2, *mdata;
6657 int len, mlen, error;
6658 mbuf_t om, m2, recm;
6659 u_int32_t recmark;
6660
6661 if (slp->ns_flag & SLP_GETSTREAM) {
6662 panic("nfs getstream");
6663 }
6664 slp->ns_flag |= SLP_GETSTREAM;
6665 for (;;) {
6666 if (slp->ns_reclen == 0) {
6667 if (slp->ns_cc < NFSX_UNSIGNED) {
6668 slp->ns_flag &= ~SLP_GETSTREAM;
6669 return 0;
6670 }
6671 m = slp->ns_raw;
6672 mdata = mbuf_data(m);
6673 mlen = mbuf_len(m);
6674 if (mlen >= NFSX_UNSIGNED) {
6675 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6676 mdata += NFSX_UNSIGNED;
6677 mlen -= NFSX_UNSIGNED;
6678 mbuf_setdata(m, mdata, mlen);
6679 } else {
6680 cp1 = (caddr_t)&recmark;
6681 cp2 = mdata;
6682 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6683 while (mlen == 0) {
6684 m = mbuf_next(m);
6685 cp2 = mbuf_data(m);
6686 mlen = mbuf_len(m);
6687 }
6688 *cp1++ = *cp2++;
6689 mlen--;
6690 mbuf_setdata(m, cp2, mlen);
6691 }
6692 }
6693 slp->ns_cc -= NFSX_UNSIGNED;
6694 recmark = ntohl(recmark);
6695 slp->ns_reclen = recmark & ~0x80000000;
6696 if (recmark & 0x80000000) {
6697 slp->ns_flag |= SLP_LASTFRAG;
6698 } else {
6699 slp->ns_flag &= ~SLP_LASTFRAG;
6700 }
6701 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6702 slp->ns_flag &= ~SLP_GETSTREAM;
6703 return EPERM;
6704 }
6705 }
6706
6707 /*
6708 * Now get the record part.
6709 *
6710 * Note that slp->ns_reclen may be 0. Linux sometimes
6711 * generates 0-length RPCs
6712 */
6713 recm = NULL;
6714 if (slp->ns_cc == slp->ns_reclen) {
6715 recm = slp->ns_raw;
6716 slp->ns_raw = slp->ns_rawend = NULL;
6717 slp->ns_cc = slp->ns_reclen = 0;
6718 } else if (slp->ns_cc > slp->ns_reclen) {
6719 len = 0;
6720 m = slp->ns_raw;
6721 mlen = mbuf_len(m);
6722 mdata = mbuf_data(m);
6723 om = NULL;
6724 while (len < slp->ns_reclen) {
6725 if ((len + mlen) > slp->ns_reclen) {
6726 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
6727 slp->ns_flag &= ~SLP_GETSTREAM;
6728 return EWOULDBLOCK;
6729 }
6730 if (om) {
6731 if (mbuf_setnext(om, m2)) {
6732 /* trouble... just drop it */
6733 printf("nfsrv_getstream: mbuf_setnext failed\n");
6734 mbuf_freem(m2);
6735 slp->ns_flag &= ~SLP_GETSTREAM;
6736 return EWOULDBLOCK;
6737 }
6738 recm = slp->ns_raw;
6739 } else {
6740 recm = m2;
6741 }
6742 mdata += slp->ns_reclen - len;
6743 mlen -= slp->ns_reclen - len;
6744 mbuf_setdata(m, mdata, mlen);
6745 len = slp->ns_reclen;
6746 } else if ((len + mlen) == slp->ns_reclen) {
6747 om = m;
6748 len += mlen;
6749 m = mbuf_next(m);
6750 recm = slp->ns_raw;
6751 if (mbuf_setnext(om, NULL)) {
6752 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6753 slp->ns_flag &= ~SLP_GETSTREAM;
6754 return EWOULDBLOCK;
6755 }
6756 mlen = mbuf_len(m);
6757 mdata = mbuf_data(m);
6758 } else {
6759 om = m;
6760 len += mlen;
6761 m = mbuf_next(m);
6762 mlen = mbuf_len(m);
6763 mdata = mbuf_data(m);
6764 }
6765 }
6766 slp->ns_raw = m;
6767 slp->ns_cc -= len;
6768 slp->ns_reclen = 0;
6769 } else {
6770 slp->ns_flag &= ~SLP_GETSTREAM;
6771 return 0;
6772 }
6773
6774 /*
6775 * Accumulate the fragments into a record.
6776 */
6777 if (slp->ns_frag == NULL) {
6778 slp->ns_frag = recm;
6779 } else {
6780 m = slp->ns_frag;
6781 while ((m2 = mbuf_next(m))) {
6782 m = m2;
6783 }
6784 if ((error = mbuf_setnext(m, recm))) {
6785 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error);
6786 }
6787 }
6788 if (slp->ns_flag & SLP_LASTFRAG) {
6789 if (slp->ns_recend) {
6790 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
6791 } else {
6792 slp->ns_rec = slp->ns_frag;
6793 slp->ns_flag |= SLP_DOREC;
6794 }
6795 slp->ns_recend = slp->ns_frag;
6796 slp->ns_frag = NULL;
6797 }
6798 }
6799 }
6800
6801 /*
6802 * Parse an RPC header.
6803 */
6804 int
6805 nfsrv_dorec(
6806 struct nfsrv_sock *slp,
6807 struct nfsd *nfsd,
6808 struct nfsrv_descript **ndp)
6809 {
6810 mbuf_t m;
6811 mbuf_t nam;
6812 struct nfsrv_descript *nd;
6813 int error = 0;
6814
6815 *ndp = NULL;
6816 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6817 return ENOBUFS;
6818 }
6819 MALLOC_ZONE(nd, struct nfsrv_descript *,
6820 sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK);
6821 if (!nd) {
6822 return ENOMEM;
6823 }
6824 m = slp->ns_rec;
6825 slp->ns_rec = mbuf_nextpkt(m);
6826 if (slp->ns_rec) {
6827 mbuf_setnextpkt(m, NULL);
6828 } else {
6829 slp->ns_flag &= ~SLP_DOREC;
6830 slp->ns_recend = NULL;
6831 }
6832 slp->ns_reccnt--;
6833 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
6834 nam = m;
6835 m = mbuf_next(m);
6836 if ((error = mbuf_setnext(nam, NULL))) {
6837 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
6838 }
6839 } else {
6840 nam = NULL;
6841 }
6842 nd->nd_nam2 = nam;
6843 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
6844 if (!error) {
6845 error = nfsrv_getreq(nd);
6846 }
6847 if (error) {
6848 if (nam) {
6849 mbuf_freem(nam);
6850 }
6851 if (nd->nd_gss_context) {
6852 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
6853 }
6854 FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC);
6855 return error;
6856 }
6857 nd->nd_mrep = NULL;
6858 *ndp = nd;
6859 nfsd->nfsd_nd = nd;
6860 return 0;
6861 }
6862
6863 /*
6864 * Parse an RPC request
6865 * - verify it
6866 * - fill in the cred struct.
6867 */
6868 int
6869 nfsrv_getreq(struct nfsrv_descript *nd)
6870 {
6871 struct nfsm_chain *nmreq;
6872 int len, i;
6873 u_int32_t nfsvers, auth_type;
6874 int error = 0;
6875 uid_t user_id;
6876 gid_t group_id;
6877 int ngroups;
6878 uint32_t val;
6879
6880 nd->nd_cr = NULL;
6881 nd->nd_gss_context = NULL;
6882 nd->nd_gss_seqnum = 0;
6883 nd->nd_gss_mb = NULL;
6884
6885 user_id = group_id = -2;
6886 val = auth_type = len = 0;
6887
6888 nmreq = &nd->nd_nmreq;
6889 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6890 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6891 if (!error && (val != RPC_CALL)) {
6892 error = EBADRPC;
6893 }
6894 nfsmout_if(error);
6895 nd->nd_repstat = 0;
6896 nfsm_chain_get_32(error, nmreq, val); // RPC Version
6897 nfsmout_if(error);
6898 if (val != RPC_VER2) {
6899 nd->nd_repstat = ERPCMISMATCH;
6900 nd->nd_procnum = NFSPROC_NOOP;
6901 return 0;
6902 }
6903 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
6904 nfsmout_if(error);
6905 if (val != NFS_PROG) {
6906 nd->nd_repstat = EPROGUNAVAIL;
6907 nd->nd_procnum = NFSPROC_NOOP;
6908 return 0;
6909 }
6910 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6911 nfsmout_if(error);
6912 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
6913 nd->nd_repstat = EPROGMISMATCH;
6914 nd->nd_procnum = NFSPROC_NOOP;
6915 return 0;
6916 }
6917 nd->nd_vers = nfsvers;
6918 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6919 nfsmout_if(error);
6920 if ((nd->nd_procnum >= NFS_NPROCS) ||
6921 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
6922 nd->nd_repstat = EPROCUNAVAIL;
6923 nd->nd_procnum = NFSPROC_NOOP;
6924 return 0;
6925 }
6926 if (nfsvers != NFS_VER3) {
6927 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
6928 }
6929 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
6930 nfsm_chain_get_32(error, nmreq, len); // Auth Length
6931 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
6932 error = EBADRPC;
6933 }
6934 nfsmout_if(error);
6935
6936 /* Handle authentication */
6937 if (auth_type == RPCAUTH_SYS) {
6938 struct posix_cred temp_pcred;
6939 if (nd->nd_procnum == NFSPROC_NULL) {
6940 return 0;
6941 }
6942 nd->nd_sec = RPCAUTH_SYS;
6943 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
6944 nfsm_chain_get_32(error, nmreq, len); // hostname length
6945 if (len < 0 || len > NFS_MAXNAMLEN) {
6946 error = EBADRPC;
6947 }
6948 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
6949 nfsmout_if(error);
6950
6951 /* create a temporary credential using the bits from the wire */
6952 bzero(&temp_pcred, sizeof(temp_pcred));
6953 nfsm_chain_get_32(error, nmreq, user_id);
6954 nfsm_chain_get_32(error, nmreq, group_id);
6955 temp_pcred.cr_groups[0] = group_id;
6956 nfsm_chain_get_32(error, nmreq, len); // extra GID count
6957 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
6958 error = EBADRPC;
6959 }
6960 nfsmout_if(error);
6961 for (i = 1; i <= len; i++) {
6962 if (i < NGROUPS) {
6963 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
6964 } else {
6965 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
6966 }
6967 }
6968 nfsmout_if(error);
6969 ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1);
6970 if (ngroups > 1) {
6971 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
6972 }
6973 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
6974 nfsm_chain_get_32(error, nmreq, len); // verifier length
6975 if (len < 0 || len > RPCAUTH_MAXSIZ) {
6976 error = EBADRPC;
6977 }
6978 if (len > 0) {
6979 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
6980 }
6981
6982 /* request creation of a real credential */
6983 temp_pcred.cr_uid = user_id;
6984 temp_pcred.cr_ngroups = ngroups;
6985 nd->nd_cr = posix_cred_create(&temp_pcred);
6986 if (nd->nd_cr == NULL) {
6987 nd->nd_repstat = ENOMEM;
6988 nd->nd_procnum = NFSPROC_NOOP;
6989 return 0;
6990 }
6991 } else if (auth_type == RPCSEC_GSS) {
6992 error = nfs_gss_svc_cred_get(nd, nmreq);
6993 if (error) {
6994 if (error == EINVAL) {
6995 goto nfsmout; // drop the request
6996 }
6997 nd->nd_repstat = error;
6998 nd->nd_procnum = NFSPROC_NOOP;
6999 return 0;
7000 }
7001 } else {
7002 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7003 return 0;
7004 }
7005 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7006 nd->nd_procnum = NFSPROC_NOOP;
7007 return 0;
7008 }
7009 return 0;
7010 nfsmout:
7011 if (IS_VALID_CRED(nd->nd_cr)) {
7012 kauth_cred_unref(&nd->nd_cr);
7013 }
7014 nfsm_chain_cleanup(nmreq);
7015 return error;
7016 }
7017
7018 /*
7019 * Search for a sleeping nfsd and wake it up.
7020 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7021 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7022 * Note: Must be called with nfsd_mutex held.
7023 */
7024 void
7025 nfsrv_wakenfsd(struct nfsrv_sock *slp)
7026 {
7027 struct nfsd *nd;
7028
7029 if ((slp->ns_flag & SLP_VALID) == 0) {
7030 return;
7031 }
7032
7033 lck_rw_lock_exclusive(&slp->ns_rwlock);
7034 /* if there's work to do on this socket, make sure it's queued up */
7035 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7036 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7037 slp->ns_flag |= SLP_WAITQ;
7038 }
7039 lck_rw_done(&slp->ns_rwlock);
7040
7041 /* wake up a waiting nfsd, if possible */
7042 nd = TAILQ_FIRST(&nfsd_queue);
7043 if (!nd) {
7044 return;
7045 }
7046
7047 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7048 nd->nfsd_flag &= ~NFSD_WAITING;
7049 wakeup(nd);
7050 }
7051
7052 #endif /* CONFIG_NFS_SERVER */
7053
7054 #endif /* CONFIG_NFS */