]> git.saurik.com Git - apple/xnu.git/blob - bsd/nfs/nfs_socket.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / bsd / nfs / nfs_socket.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1991, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
65 * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS
70
71 /*
72 * Socket operations for use by nfs
73 */
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/proc.h>
78 #include <sys/signalvar.h>
79 #include <sys/kauth.h>
80 #include <sys/mount_internal.h>
81 #include <sys/kernel.h>
82 #include <sys/kpi_mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/vnode.h>
85 #include <sys/domain.h>
86 #include <sys/protosw.h>
87 #include <sys/socket.h>
88 #include <sys/un.h>
89 #include <sys/syslog.h>
90 #include <sys/tprintf.h>
91 #include <libkern/OSAtomic.h>
92
93 #include <sys/reboot.h>
94 #include <sys/time.h>
95 #include <kern/clock.h>
96 #include <kern/task.h>
97 #include <kern/thread.h>
98 #include <kern/thread_call.h>
99 #include <sys/user.h>
100 #include <sys/acct.h>
101
102 #include <netinet/in.h>
103 #include <netinet/tcp.h>
104
105 #include <nfs/rpcv2.h>
106 #include <nfs/krpc.h>
107 #include <nfs/nfsproto.h>
108 #include <nfs/nfs.h>
109 #include <nfs/xdr_subs.h>
110 #include <nfs/nfsm_subs.h>
111 #include <nfs/nfs_gss.h>
112 #include <nfs/nfsmount.h>
113 #include <nfs/nfsnode.h>
114
115 #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__)
116 #define NFS_SOCK_DUMP_MBUF(msg, mb) if (NFS_IS_DBG(NFS_FAC_SOCK, 15)) nfs_dump_mbuf(__func__, __LINE__, (msg), (mb))
117
118 #ifndef SUN_LEN
119 #define SUN_LEN(su) \
120 (sizeof(*(su)) - sizeof((su)->sun_path) + strnlen((su)->sun_path, sizeof((su)->sun_path)))
121 #endif /* SUN_LEN */
122
123 /* XXX */
124 boolean_t current_thread_aborted(void);
125 kern_return_t thread_terminate(thread_t);
126
127 ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE);
128 ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE);
129 ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc",
130 sizeof(struct nfsrv_descript), ZC_NONE);
131
132
133 #if CONFIG_NFS_SERVER
134 int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */
135
136 int nfsrv_getstream(struct nfsrv_sock *, int);
137 int nfsrv_getreq(struct nfsrv_descript *);
138 extern int nfsv3_procid[NFS_NPROCS];
139 #endif /* CONFIG_NFS_SERVER */
140
141 /*
142 * compare two sockaddr structures
143 */
144 int
145 nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2)
146 {
147 if (!sa1) {
148 return -1;
149 }
150 if (!sa2) {
151 return 1;
152 }
153 if (sa1->sa_family != sa2->sa_family) {
154 return (sa1->sa_family < sa2->sa_family) ? -1 : 1;
155 }
156 if (sa1->sa_len != sa2->sa_len) {
157 return (sa1->sa_len < sa2->sa_len) ? -1 : 1;
158 }
159 if (sa1->sa_family == AF_INET) {
160 return bcmp(&((struct sockaddr_in*)sa1)->sin_addr,
161 &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr));
162 }
163 if (sa1->sa_family == AF_INET6) {
164 return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr,
165 &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr));
166 }
167 return -1;
168 }
169
170 #if CONFIG_NFS_CLIENT
171
172 int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
173 int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int);
174 int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *);
175 void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *);
176 void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
177 int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *);
178 int nfs_reconnect(struct nfsmount *);
179 int nfs_connect_setup(struct nfsmount *);
180 void nfs_mount_sock_thread(void *, wait_result_t);
181 void nfs_udp_rcv(socket_t, void*, int);
182 void nfs_tcp_rcv(socket_t, void*, int);
183 void nfs_sock_poke(struct nfsmount *);
184 void nfs_request_match_reply(struct nfsmount *, mbuf_t);
185 void nfs_reqdequeue(struct nfsreq *);
186 void nfs_reqbusy(struct nfsreq *);
187 struct nfsreq *nfs_reqnext(struct nfsreq *);
188 int nfs_wait_reply(struct nfsreq *);
189 void nfs_softterm(struct nfsreq *);
190 int nfs_can_squish(struct nfsmount *);
191 int nfs_is_squishy(struct nfsmount *);
192 int nfs_is_dead(int, struct nfsmount *);
193
194 /*
195 * Estimate rto for an nfs rpc sent via. an unreliable datagram.
196 * Use the mean and mean deviation of rtt for the appropriate type of rpc
197 * for the frequent rpcs and a default for the others.
198 * The justification for doing "other" this way is that these rpcs
199 * happen so infrequently that timer est. would probably be stale.
200 * Also, since many of these rpcs are
201 * non-idempotent, a conservative timeout is desired.
202 * getattr, lookup - A+2D
203 * read, write - A+4D
204 * other - nm_timeo
205 */
206 #define NFS_RTO(n, t) \
207 ((t) == 0 ? (n)->nm_timeo : \
208 ((t) < 3 ? \
209 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
210 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
211 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
212 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
213
214 /*
215 * Defines which timer to use for the procnum.
216 * 0 - default
217 * 1 - getattr
218 * 2 - lookup
219 * 3 - read
220 * 4 - write
221 */
222 static const int proct[] = {
223 [NFSPROC_NULL] = 0,
224 [NFSPROC_GETATTR] = 1,
225 [NFSPROC_SETATTR] = 0,
226 [NFSPROC_LOOKUP] = 2,
227 [NFSPROC_ACCESS] = 1,
228 [NFSPROC_READLINK] = 3,
229 [NFSPROC_READ] = 3,
230 [NFSPROC_WRITE] = 4,
231 [NFSPROC_CREATE] = 0,
232 [NFSPROC_MKDIR] = 0,
233 [NFSPROC_SYMLINK] = 0,
234 [NFSPROC_MKNOD] = 0,
235 [NFSPROC_REMOVE] = 0,
236 [NFSPROC_RMDIR] = 0,
237 [NFSPROC_RENAME] = 0,
238 [NFSPROC_LINK] = 0,
239 [NFSPROC_READDIR] = 3,
240 [NFSPROC_READDIRPLUS] = 3,
241 [NFSPROC_FSSTAT] = 0,
242 [NFSPROC_FSINFO] = 0,
243 [NFSPROC_PATHCONF] = 0,
244 [NFSPROC_COMMIT] = 0,
245 [NFSPROC_NOOP] = 0,
246 };
247
248 /*
249 * There is a congestion window for outstanding rpcs maintained per mount
250 * point. The cwnd size is adjusted in roughly the way that:
251 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of
252 * SIGCOMM '88". ACM, August 1988.
253 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout
254 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd
255 * of rpcs is in progress.
256 * (The sent count and cwnd are scaled for integer arith.)
257 * Variants of "slow start" were tried and were found to be too much of a
258 * performance hit (ave. rtt 3 times larger),
259 * I suspect due to the large rtt that nfs rpcs have.
260 */
261 #define NFS_CWNDSCALE 256
262 #define NFS_MAXCWND (NFS_CWNDSCALE * 32)
263 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, };
264
265 /*
266 * Increment location index to next address/server/location.
267 */
268 void
269 nfs_location_next(struct nfs_fs_locations *nlp, struct nfs_location_index *nlip)
270 {
271 uint8_t loc = nlip->nli_loc;
272 uint8_t serv = nlip->nli_serv;
273 uint8_t addr = nlip->nli_addr;
274
275 /* move to next address */
276 addr++;
277 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
278 /* no more addresses on current server, go to first address of next server */
279 next_server:
280 addr = 0;
281 serv++;
282 if (serv >= nlp->nl_locations[loc]->nl_servcount) {
283 /* no more servers on current location, go to first server of next location */
284 serv = 0;
285 loc++;
286 if (loc >= nlp->nl_numlocs) {
287 loc = 0; /* after last location, wrap back around to first location */
288 }
289 }
290 }
291 /*
292 * It's possible for this next server to not have any addresses.
293 * Check for that here and go to the next server.
294 * But bail out if we've managed to come back around to the original
295 * location that was passed in. (That would mean no servers had any
296 * addresses. And we don't want to spin here forever.)
297 */
298 if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) {
299 return;
300 }
301 if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) {
302 goto next_server;
303 }
304
305 nlip->nli_loc = loc;
306 nlip->nli_serv = serv;
307 nlip->nli_addr = addr;
308 }
309
310 /*
311 * Compare two location indices.
312 */
313 int
314 nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2)
315 {
316 if (nlip1->nli_loc != nlip2->nli_loc) {
317 return nlip1->nli_loc - nlip2->nli_loc;
318 }
319 if (nlip1->nli_serv != nlip2->nli_serv) {
320 return nlip1->nli_serv - nlip2->nli_serv;
321 }
322 return nlip1->nli_addr - nlip2->nli_addr;
323 }
324
325 /*
326 * Get the mntfromname (or path portion only) for a given location.
327 */
328 void
329 nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly)
330 {
331 struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc];
332 char *p;
333 int cnt, i;
334
335 p = s;
336 if (!pathonly) {
337 char *name = fsl->nl_servers[idx.nli_serv]->ns_name;
338 if (name == NULL) {
339 name = "";
340 }
341 if (*name == '\0') {
342 if (*fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr]) {
343 name = fsl->nl_servers[idx.nli_serv]->ns_addresses[idx.nli_addr];
344 }
345 cnt = scnprintf(p, size, "<%s>:", name);
346 } else {
347 cnt = scnprintf(p, size, "%s:", name);
348 }
349 p += cnt;
350 size -= cnt;
351 }
352 if (fsl->nl_path.np_compcount == 0) {
353 /* mounting root export on server */
354 if (size > 0) {
355 *p++ = '/';
356 *p++ = '\0';
357 }
358 return;
359 }
360 /* append each server path component */
361 for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) {
362 cnt = scnprintf(p, size, "/%s", fsl->nl_path.np_components[i]);
363 p += cnt;
364 size -= cnt;
365 }
366 }
367
368 /*
369 * NFS client connect socket upcall.
370 * (Used only during socket connect/search.)
371 */
372 void
373 nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag)
374 {
375 struct nfs_socket *nso = arg;
376 size_t rcvlen;
377 mbuf_t m;
378 int error = 0, recv = 1;
379
380 if (nso->nso_flags & NSO_CONNECTING) {
381 NFS_SOCK_DBG("nfs connect - socket %p upcall - connecting flags = %8.8x\n", nso, nso->nso_flags);
382 wakeup(nso->nso_wake);
383 return;
384 }
385
386 lck_mtx_lock(&nso->nso_lock);
387 if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) {
388 NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso);
389 lck_mtx_unlock(&nso->nso_lock);
390 return;
391 }
392 NFS_SOCK_DBG("nfs connect - socket %p upcall %8.8x\n", nso, nso->nso_flags);
393 nso->nso_flags |= NSO_UPCALL;
394
395 /* loop while we make error-free progress */
396 while (!error && recv) {
397 /* make sure we're still interested in this socket */
398 if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) {
399 break;
400 }
401 lck_mtx_unlock(&nso->nso_lock);
402 m = NULL;
403 if (nso->nso_sotype == SOCK_STREAM) {
404 error = nfs_rpc_record_read(so, &nso->nso_rrs, MSG_DONTWAIT, &recv, &m);
405 NFS_SOCK_DBG("nfs_rpc_record_read returned %d recv = %d\n", error, recv);
406 } else {
407 rcvlen = 1000000;
408 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
409 recv = m ? 1 : 0;
410 }
411 lck_mtx_lock(&nso->nso_lock);
412 if (m) {
413 /* match response with request */
414 struct nfsm_chain nmrep;
415 uint32_t reply = 0, rxid = 0, verf_type, verf_len;
416 uint32_t reply_status, rejected_status, accepted_status;
417
418 NFS_SOCK_DUMP_MBUF("Got mbuf from ping", m);
419 nfsm_chain_dissect_init(error, &nmrep, m);
420 nfsm_chain_get_32(error, &nmrep, rxid);
421 nfsm_chain_get_32(error, &nmrep, reply);
422 if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) {
423 error = EBADRPC;
424 }
425 nfsm_chain_get_32(error, &nmrep, reply_status);
426 if (!error && (reply_status == RPC_MSGDENIED)) {
427 nfsm_chain_get_32(error, &nmrep, rejected_status);
428 if (!error) {
429 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
430 }
431 }
432 nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */
433 nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */
434 nfsmout_if(error);
435 if (verf_len) {
436 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
437 }
438 nfsm_chain_get_32(error, &nmrep, accepted_status);
439 nfsmout_if(error);
440 NFS_SOCK_DBG("Recevied accepted_status of %d nso_version = %d\n", accepted_status, nso->nso_version);
441 if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) {
442 uint32_t minvers, maxvers;
443 nfsm_chain_get_32(error, &nmrep, minvers);
444 nfsm_chain_get_32(error, &nmrep, maxvers);
445 nfsmout_if(error);
446 if (nso->nso_protocol == PMAPPROG) {
447 if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) {
448 error = EPROGMISMATCH;
449 } else if ((nso->nso_saddr->sa_family == AF_INET) &&
450 (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) {
451 nso->nso_version = PMAPVERS;
452 } else if (nso->nso_saddr->sa_family == AF_INET6) {
453 if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) {
454 nso->nso_version = RPCBVERS4;
455 } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) {
456 nso->nso_version = RPCBVERS3;
457 }
458 }
459 } else if (nso->nso_protocol == NFS_PROG) {
460 int vers;
461
462 /*
463 * N.B. Both portmapper and rpcbind V3 are happy to return
464 * addresses for other versions than the one you ask (getport or
465 * getaddr) and thus we may have fallen to this code path. So if
466 * we get a version that we support, use highest supported
467 * version. This assumes that the server supports all versions
468 * between minvers and maxvers. Note for IPv6 we will try and
469 * use rpcbind V4 which has getversaddr and we should not get
470 * here if that was successful.
471 */
472 for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) {
473 if (vers >= (int)minvers && vers <= (int)maxvers) {
474 break;
475 }
476 }
477 nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers;
478 }
479 if (!error && nso->nso_version) {
480 accepted_status = RPC_SUCCESS;
481 }
482 }
483 if (!error) {
484 switch (accepted_status) {
485 case RPC_SUCCESS:
486 error = 0;
487 break;
488 case RPC_PROGUNAVAIL:
489 error = EPROGUNAVAIL;
490 break;
491 case RPC_PROGMISMATCH:
492 error = EPROGMISMATCH;
493 break;
494 case RPC_PROCUNAVAIL:
495 error = EPROCUNAVAIL;
496 break;
497 case RPC_GARBAGE:
498 error = EBADRPC;
499 break;
500 case RPC_SYSTEM_ERR:
501 default:
502 error = EIO;
503 break;
504 }
505 }
506 nfsmout:
507 nso->nso_flags &= ~NSO_PINGING;
508 if (error) {
509 NFS_SOCK_DBG("nfs upcalled failed for %d program %d vers error = %d\n",
510 nso->nso_protocol, nso->nso_version, error);
511 nso->nso_error = error;
512 nso->nso_flags |= NSO_DEAD;
513 } else {
514 nso->nso_flags |= NSO_VERIFIED;
515 }
516 mbuf_freem(m);
517 /* wake up search thread */
518 wakeup(nso->nso_wake);
519 break;
520 }
521 }
522
523 nso->nso_flags &= ~NSO_UPCALL;
524 if ((error != EWOULDBLOCK) && (error || !recv)) {
525 /* problems with the socket... */
526 NFS_SOCK_DBG("connect upcall failed %d\n", error);
527 nso->nso_error = error ? error : EPIPE;
528 nso->nso_flags |= NSO_DEAD;
529 wakeup(nso->nso_wake);
530 }
531 if (nso->nso_flags & NSO_DISCONNECTING) {
532 wakeup(&nso->nso_flags);
533 }
534 lck_mtx_unlock(&nso->nso_lock);
535 }
536
537 /*
538 * Create/initialize an nfs_socket structure.
539 */
540 int
541 nfs_socket_create(
542 struct nfsmount *nmp,
543 struct sockaddr *sa,
544 uint8_t sotype,
545 in_port_t port,
546 uint32_t protocol,
547 uint32_t vers,
548 int resvport,
549 struct nfs_socket **nsop)
550 {
551 struct nfs_socket *nso;
552 struct timeval now;
553 int error;
554 #define NFS_SOCKET_DEBUGGING
555 #ifdef NFS_SOCKET_DEBUGGING
556 char naddr[sizeof((struct sockaddr_un *)0)->sun_path];
557 void *sinaddr;
558
559 switch (sa->sa_family) {
560 case AF_INET:
561 if (sa->sa_len != sizeof(struct sockaddr_in)) {
562 return EINVAL;
563 }
564 sinaddr = &((struct sockaddr_in*)sa)->sin_addr;
565 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
566 strlcpy(naddr, "<unknown>", sizeof(naddr));
567 }
568 break;
569 case AF_INET6:
570 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
571 return EINVAL;
572 }
573 sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr;
574 if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) {
575 strlcpy(naddr, "<unknown>", sizeof(naddr));
576 }
577 break;
578 case AF_LOCAL:
579 if (sa->sa_len != sizeof(struct sockaddr_un) && sa->sa_len != SUN_LEN((struct sockaddr_un *)sa)) {
580 return EINVAL;
581 }
582 strlcpy(naddr, ((struct sockaddr_un *)sa)->sun_path, sizeof(naddr));
583 break;
584 default:
585 strlcpy(naddr, "<unsupported address family>", sizeof(naddr));
586 break;
587 }
588 #else
589 char naddr[1] = { 0 };
590 #endif
591
592 *nsop = NULL;
593
594 /* Create the socket. */
595 MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO);
596 if (nso) {
597 MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO);
598 }
599 if (!nso || !nso->nso_saddr) {
600 if (nso) {
601 FREE(nso, M_TEMP);
602 }
603 return ENOMEM;
604 }
605 lck_mtx_init(&nso->nso_lock, &nfs_request_grp, LCK_ATTR_NULL);
606 nso->nso_sotype = sotype;
607 if (nso->nso_sotype == SOCK_STREAM) {
608 nfs_rpc_record_state_init(&nso->nso_rrs);
609 }
610 microuptime(&now);
611 nso->nso_timestamp = now.tv_sec;
612 bcopy(sa, nso->nso_saddr, sa->sa_len);
613 switch (sa->sa_family) {
614 case AF_INET:
615 case AF_INET6:
616 if (sa->sa_family == AF_INET) {
617 ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
618 } else if (sa->sa_family == AF_INET6) {
619 ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
620 }
621 break;
622 case AF_LOCAL:
623 break;
624 }
625 nso->nso_protocol = protocol;
626 nso->nso_version = vers;
627 nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers);
628 nso->nso_nfs_max_vers = PVER2MAJOR(nmp->nm_max_vers);
629
630 error = sock_socket(sa->sa_family, nso->nso_sotype, 0, NULL, NULL, &nso->nso_so);
631
632 /* Some servers require that the client port be a reserved port number. */
633 if (!error && resvport && ((sa->sa_family == AF_INET) || (sa->sa_family == AF_INET6))) {
634 struct sockaddr_storage ss;
635 int level = (sa->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
636 int optname = (sa->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
637 int portrange = IP_PORTRANGE_LOW;
638
639 error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange));
640 if (!error) { /* bind now to check for failure */
641 ss.ss_len = sa->sa_len;
642 ss.ss_family = sa->sa_family;
643 if (ss.ss_family == AF_INET) {
644 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
645 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
646 } else if (ss.ss_family == AF_INET6) {
647 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
648 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
649 } else {
650 error = EINVAL;
651 }
652 if (!error) {
653 error = sock_bind(nso->nso_so, (struct sockaddr*)&ss);
654 }
655 }
656 }
657
658 if (error) {
659 NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n",
660 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype,
661 resvport ? "r" : "", port, protocol, vers);
662 nfs_socket_destroy(nso);
663 } else {
664 NFS_SOCK_DBG("nfs connect %s created socket %p <%s> type %d%s port %d prot %d %d\n",
665 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr,
666 sotype, resvport ? "r" : "", port, protocol, vers);
667 *nsop = nso;
668 }
669 return error;
670 }
671
672 /*
673 * Destroy an nfs_socket structure.
674 */
675 void
676 nfs_socket_destroy(struct nfs_socket *nso)
677 {
678 struct timespec ts = { .tv_sec = 4, .tv_nsec = 0 };
679
680 NFS_SOCK_DBG("Destoring socket %p flags = %8.8x error = %d\n", nso, nso->nso_flags, nso->nso_error);
681 lck_mtx_lock(&nso->nso_lock);
682 nso->nso_flags |= NSO_DISCONNECTING;
683 if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */
684 msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts);
685 }
686 lck_mtx_unlock(&nso->nso_lock);
687 sock_shutdown(nso->nso_so, SHUT_RDWR);
688 sock_close(nso->nso_so);
689 if (nso->nso_sotype == SOCK_STREAM) {
690 nfs_rpc_record_state_cleanup(&nso->nso_rrs);
691 }
692 lck_mtx_destroy(&nso->nso_lock, &nfs_request_grp);
693 if (nso->nso_saddr) {
694 FREE(nso->nso_saddr, M_SONAME);
695 }
696 if (nso->nso_saddr2) {
697 FREE(nso->nso_saddr2, M_SONAME);
698 }
699 NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso);
700 FREE(nso, M_TEMP);
701 }
702
703 /*
704 * Set common socket options on an nfs_socket.
705 */
706 void
707 nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso)
708 {
709 /*
710 * Set socket send/receive timeouts
711 * - Receive timeout shouldn't matter because most receives are performed
712 * in the socket upcall non-blocking.
713 * - Send timeout should allow us to react to a blocked socket.
714 * Soft mounts will want to abort sooner.
715 */
716 struct timeval timeo;
717 int on = 1, proto, reserve, error;
718
719 timeo.tv_usec = 0;
720 timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60;
721 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
722 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
723 if (nso->nso_sotype == SOCK_STREAM) {
724 /* Assume that SOCK_STREAM always requires a connection */
725 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
726 /* set nodelay for TCP */
727 sock_gettype(nso->nso_so, NULL, NULL, &proto);
728 if (proto == IPPROTO_TCP) {
729 sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
730 }
731 }
732
733 /* set socket buffer sizes for UDP/TCP */
734 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2);
735 {
736 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve));
737 }
738
739 if (error) {
740 log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve);
741 }
742
743 reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2);
744 error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve));
745 if (error) {
746 log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve);
747 }
748
749 /* set SO_NOADDRERR to detect network changes ASAP */
750 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
751 /* just playin' it safe with upcalls */
752 sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
753 /* socket should be interruptible if the mount is */
754 if (!NMFLAG(nmp, INTR)) {
755 sock_nointerrupt(nso->nso_so, 1);
756 }
757 }
758
759 /*
760 * Release resources held in an nfs_socket_search.
761 */
762 void
763 nfs_socket_search_cleanup(struct nfs_socket_search *nss)
764 {
765 struct nfs_socket *nso, *nsonext;
766
767 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
768 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
769 nss->nss_sockcnt--;
770 nfs_socket_destroy(nso);
771 }
772 if (nss->nss_sock) {
773 nfs_socket_destroy(nss->nss_sock);
774 nss->nss_sock = NULL;
775 }
776 }
777
778 /*
779 * Prefer returning certain errors over others.
780 * This function returns a ranking of the given error.
781 */
782 int
783 nfs_connect_error_class(int error)
784 {
785 switch (error) {
786 case 0:
787 return 0;
788 case ETIMEDOUT:
789 case EAGAIN:
790 return 1;
791 case EPIPE:
792 case EADDRNOTAVAIL:
793 case ENETDOWN:
794 case ENETUNREACH:
795 case ENETRESET:
796 case ECONNABORTED:
797 case ECONNRESET:
798 case EISCONN:
799 case ENOTCONN:
800 case ESHUTDOWN:
801 case ECONNREFUSED:
802 case EHOSTDOWN:
803 case EHOSTUNREACH:
804 return 2;
805 case ERPCMISMATCH:
806 case EPROCUNAVAIL:
807 case EPROGMISMATCH:
808 case EPROGUNAVAIL:
809 return 3;
810 case EBADRPC:
811 return 4;
812 default:
813 return 5;
814 }
815 }
816
817 /*
818 * Make sure a socket search returns the best error.
819 */
820 void
821 nfs_socket_search_update_error(struct nfs_socket_search *nss, int error)
822 {
823 if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) {
824 nss->nss_error = error;
825 }
826 }
827
828 /* nfs_connect_search_new_socket:
829 * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified
830 * by nss.
831 *
832 * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but
833 * could not be used or if a socket timed out.
834 */
835 int
836 nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
837 {
838 struct nfs_fs_location *fsl;
839 struct nfs_fs_server *fss;
840 struct sockaddr_storage ss;
841 struct nfs_socket *nso;
842 char *addrstr;
843 int error = 0;
844
845
846 NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n",
847 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt);
848
849 /*
850 * while there are addresses and:
851 * we have no sockets or
852 * the last address failed and did not produce a socket (nss_last < 0) or
853 * Its been a while (2 seconds) and we have less than the max number of concurrent sockets to search (4)
854 * then attempt to create a socket with the current address.
855 */
856 while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) ||
857 ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) {
858 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
859 return EINTR;
860 }
861 /* Can we convert the address to a sockaddr? */
862 fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc];
863 fss = fsl->nl_servers[nss->nss_nextloc.nli_serv];
864 addrstr = fss->ns_addresses[nss->nss_nextloc.nli_addr];
865 NFS_SOCK_DBG("Trying address %s for program %d on port %d\n", addrstr, nss->nss_protocol, nss->nss_port);
866 if (*addrstr == '\0') {
867 /*
868 * We have an unspecified local domain address. We use the program to translate to
869 * a well known local transport address. We only support PMAPROG and NFS for this.
870 */
871 if (nss->nss_protocol == PMAPPROG) {
872 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? RPCB_TICLTS_PATH : RPCB_TICOTSORD_PATH;
873 } else if (nss->nss_protocol == NFS_PROG) {
874 addrstr = nmp->nm_nfs_localport;
875 if (!addrstr || *addrstr == '\0') {
876 addrstr = (nss->nss_sotype == SOCK_DGRAM) ? NFS_TICLTS_PATH : NFS_TICOTSORD_PATH;
877 }
878 }
879 NFS_SOCK_DBG("Calling prog %d with <%s>\n", nss->nss_protocol, addrstr);
880 }
881 if (!nfs_uaddr2sockaddr(addrstr, (struct sockaddr*)&ss)) {
882 NFS_SOCK_DBG("Could not convert address %s to socket\n", addrstr);
883 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
884 nss->nss_addrcnt -= 1;
885 nss->nss_last = -2;
886 continue;
887 }
888 /* Check that socket family is acceptable. */
889 if (nmp->nm_sofamily && (ss.ss_family != nmp->nm_sofamily)) {
890 NFS_SOCK_DBG("Skipping socket family %d, want mount family %d\n", ss.ss_family, nmp->nm_sofamily);
891 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
892 nss->nss_addrcnt -= 1;
893 nss->nss_last = -2;
894 continue;
895 }
896
897 /* Create the socket. */
898 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype,
899 nss->nss_port, nss->nss_protocol, nss->nss_version,
900 ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso);
901 if (error) {
902 return error;
903 }
904
905 nso->nso_location = nss->nss_nextloc;
906 nso->nso_wake = nss;
907 error = sock_setupcall(nso->nso_so, nfs_connect_upcall, nso);
908 if (error) {
909 NFS_SOCK_DBG("sock_setupcall failed for socket %p setting nfs_connect_upcall error = %d\n", nso, error);
910 lck_mtx_lock(&nso->nso_lock);
911 nso->nso_error = error;
912 nso->nso_flags |= NSO_DEAD;
913 lck_mtx_unlock(&nso->nso_lock);
914 }
915
916 TAILQ_INSERT_TAIL(&nss->nss_socklist, nso, nso_link);
917 nss->nss_sockcnt++;
918 nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc);
919 nss->nss_addrcnt -= 1;
920
921 nss->nss_last = now->tv_sec;
922 }
923
924 if (nss->nss_addrcnt == 0 && nss->nss_last < 0) {
925 nss->nss_last = now->tv_sec;
926 }
927
928 return error;
929 }
930
931 /*
932 * nfs_connect_search_socket_connect: Connect an nfs socket nso for nfsmount nmp.
933 * If successful set the socket options for the socket as require from the mount.
934 *
935 * Assumes: nso->nso_lock is held on entry and return.
936 */
937 int
938 nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose)
939 {
940 int error;
941
942 if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) {
943 /* no connection needed, just say it's already connected */
944 NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n",
945 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
946 nso->nso_flags |= NSO_CONNECTED;
947 nfs_socket_options(nmp, nso);
948 return 1; /* Socket is connected and setup */
949 } else if (!(nso->nso_flags & NSO_CONNECTING)) {
950 /* initiate the connection */
951 nso->nso_flags |= NSO_CONNECTING;
952 lck_mtx_unlock(&nso->nso_lock);
953 NFS_SOCK_DBG("nfs connect %s connecting socket %p %s\n",
954 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso,
955 nso->nso_saddr->sa_family == AF_LOCAL ? ((struct sockaddr_un*)nso->nso_saddr)->sun_path : "");
956 error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT);
957 if (error) {
958 NFS_SOCK_DBG("nfs connect %s connecting socket %p returned %d\n",
959 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
960 }
961 lck_mtx_lock(&nso->nso_lock);
962 if (error && (error != EINPROGRESS)) {
963 nso->nso_error = error;
964 nso->nso_flags |= NSO_DEAD;
965 return 0;
966 }
967 }
968 if (nso->nso_flags & NSO_CONNECTING) {
969 /* check the connection */
970 if (sock_isconnected(nso->nso_so)) {
971 NFS_SOCK_DBG("nfs connect %s socket %p is connected\n",
972 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
973 nso->nso_flags &= ~NSO_CONNECTING;
974 nso->nso_flags |= NSO_CONNECTED;
975 nfs_socket_options(nmp, nso);
976 return 1; /* Socket is connected and setup */
977 } else {
978 int optlen = sizeof(error);
979 error = 0;
980 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen);
981 if (error) { /* we got an error on the socket */
982 NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n",
983 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
984 if (verbose) {
985 printf("nfs connect socket error %d for %s\n",
986 error, vfs_statfs(nmp->nm_mountp)->f_mntfromname);
987 }
988 nso->nso_error = error;
989 nso->nso_flags |= NSO_DEAD;
990 return 0;
991 }
992 }
993 }
994
995 return 0; /* Waiting to be connected */
996 }
997
998 /*
999 * nfs_connect_search_ping: Send a null proc on the nso socket.
1000 */
1001 int
1002 nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct timeval *now)
1003 {
1004 /* initiate a NULL RPC request */
1005 uint64_t xid = nso->nso_pingxid;
1006 mbuf_t m, mreq = NULL;
1007 struct msghdr msg;
1008 size_t reqlen, sentlen;
1009 uint32_t vers = nso->nso_version;
1010 int error;
1011
1012 if (!vers) {
1013 if (nso->nso_protocol == PMAPPROG) {
1014 vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1015 } else if (nso->nso_protocol == NFS_PROG) {
1016 vers = PVER2MAJOR(nmp->nm_max_vers);
1017 }
1018 }
1019 lck_mtx_unlock(&nso->nso_lock);
1020 NFS_SOCK_DBG("Pinging socket %p %d %d %d\n", nso, nso->nso_sotype, nso->nso_protocol, vers);
1021 error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS,
1022 vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq);
1023 lck_mtx_lock(&nso->nso_lock);
1024 if (!error) {
1025 nso->nso_flags |= NSO_PINGING;
1026 nso->nso_pingxid = R_XID32(xid);
1027 nso->nso_reqtimestamp = now->tv_sec;
1028 bzero(&msg, sizeof(msg));
1029 if ((nso->nso_sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so)) {
1030 msg.msg_name = nso->nso_saddr;
1031 msg.msg_namelen = nso->nso_saddr->sa_len;
1032 }
1033 for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) {
1034 reqlen += mbuf_len(m);
1035 }
1036 lck_mtx_unlock(&nso->nso_lock);
1037 NFS_SOCK_DUMP_MBUF("Sending ping packet", mreq);
1038 error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen);
1039 NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n",
1040 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1041 lck_mtx_lock(&nso->nso_lock);
1042 if (!error && (sentlen != reqlen)) {
1043 error = ETIMEDOUT;
1044 }
1045 }
1046 if (error) {
1047 nso->nso_error = error;
1048 nso->nso_flags |= NSO_DEAD;
1049 return 0;
1050 }
1051
1052 return 1;
1053 }
1054
1055 /*
1056 * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket.
1057 * Set the nfs socket protocol and version if needed.
1058 */
1059 void
1060 nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso)
1061 {
1062 NFS_SOCK_DBG("nfs connect %s socket %p verified\n",
1063 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1064 if (!nso->nso_version) {
1065 /* If the version isn't set, the default must have worked. */
1066 if (nso->nso_protocol == PMAPPROG) {
1067 nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4;
1068 }
1069 if (nso->nso_protocol == NFS_PROG) {
1070 nso->nso_version = PVER2MAJOR(nmp->nm_max_vers);
1071 }
1072 }
1073 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1074 nss->nss_sockcnt--;
1075 nss->nss_sock = nso;
1076 }
1077
1078 /*
1079 * nfs_connect_search_socket_reap: For each socket in the search list mark any timed out socket as dead and remove from
1080 * the list. Dead socket are then destroyed.
1081 */
1082 void
1083 nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now)
1084 {
1085 struct nfs_socket *nso, *nsonext;
1086
1087 TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) {
1088 lck_mtx_lock(&nso->nso_lock);
1089 if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) {
1090 /* took too long */
1091 NFS_SOCK_DBG("nfs connect %s socket %p timed out\n",
1092 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1093 nso->nso_error = ETIMEDOUT;
1094 nso->nso_flags |= NSO_DEAD;
1095 }
1096 if (!(nso->nso_flags & NSO_DEAD)) {
1097 lck_mtx_unlock(&nso->nso_lock);
1098 continue;
1099 }
1100 lck_mtx_unlock(&nso->nso_lock);
1101 NFS_SOCK_DBG("nfs connect %s reaping socket %p error = %d flags = %8.8x\n",
1102 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error, nso->nso_flags);
1103 nfs_socket_search_update_error(nss, nso->nso_error);
1104 TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link);
1105 nss->nss_sockcnt--;
1106 nfs_socket_destroy(nso);
1107 /* If there are more sockets to try, force the starting of another socket */
1108 if (nss->nss_addrcnt > 0) {
1109 nss->nss_last = -2;
1110 }
1111 }
1112 }
1113
1114 /*
1115 * nfs_connect_search_check: Check on the status of search and wait for replies if needed.
1116 */
1117 int
1118 nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, struct timeval *now)
1119 {
1120 int error;
1121
1122 /* log a warning if connect is taking a while */
1123 if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) {
1124 printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1125 nss->nss_flags |= NSS_WARNED;
1126 }
1127 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
1128 return EINTR;
1129 }
1130 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
1131 return error;
1132 }
1133
1134 /* If we were succesfull at sending a ping, wait up to a second for a reply */
1135 if (nss->nss_last >= 0) {
1136 tsleep(nss, PSOCK, "nfs_connect_search_wait", hz);
1137 }
1138
1139 return 0;
1140 }
1141
1142
1143 /*
1144 * Continue the socket search until we have something to report.
1145 */
1146 int
1147 nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss)
1148 {
1149 struct nfs_socket *nso;
1150 struct timeval now;
1151 int error;
1152 int verbose = (nss->nss_flags & NSS_VERBOSE);
1153
1154 loop:
1155 microuptime(&now);
1156 NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec);
1157
1158 /* add a new socket to the socket list if needed and available */
1159 error = nfs_connect_search_new_socket(nmp, nss, &now);
1160 if (error) {
1161 NFS_SOCK_DBG("nfs connect returned %d\n", error);
1162 return error;
1163 }
1164
1165 /* check each active socket on the list and try to push it along */
1166 TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) {
1167 lck_mtx_lock(&nso->nso_lock);
1168
1169 /* If not connected connect it */
1170 if (!(nso->nso_flags & NSO_CONNECTED)) {
1171 if (!nfs_connect_search_socket_connect(nmp, nso, verbose)) {
1172 lck_mtx_unlock(&nso->nso_lock);
1173 continue;
1174 }
1175 }
1176
1177 /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */
1178 if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) ||
1179 ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) {
1180 if (!nfs_connect_search_ping(nmp, nso, &now)) {
1181 lck_mtx_unlock(&nso->nso_lock);
1182 continue;
1183 }
1184 }
1185
1186 /* Has the socket been verified by the up call routine? */
1187 if (nso->nso_flags & NSO_VERIFIED) {
1188 /* WOOHOO!! This socket looks good! */
1189 nfs_connect_search_socket_found(nmp, nss, nso);
1190 lck_mtx_unlock(&nso->nso_lock);
1191 break;
1192 }
1193 lck_mtx_unlock(&nso->nso_lock);
1194 }
1195
1196 /* Check for timed out sockets and mark as dead and then remove all dead sockets. */
1197 nfs_connect_search_socket_reap(nmp, nss, &now);
1198
1199 /*
1200 * Keep looping if we haven't found a socket yet and we have more
1201 * sockets to (continue to) try.
1202 */
1203 error = 0;
1204 if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) {
1205 error = nfs_connect_search_check(nmp, nss, &now);
1206 if (!error) {
1207 goto loop;
1208 }
1209 }
1210
1211 NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1212 return error;
1213 }
1214
1215 /*
1216 * Initialize a new NFS connection.
1217 *
1218 * Search for a location to connect a socket to and initialize the connection.
1219 *
1220 * An NFS mount may have multiple locations/servers/addresses available.
1221 * We attempt to connect to each one asynchronously and will start
1222 * several sockets in parallel if other locations are slow to answer.
1223 * We'll use the first NFS socket we can successfully set up.
1224 *
1225 * The search may involve contacting the portmapper service first.
1226 *
1227 * A mount's initial connection may require negotiating some parameters such
1228 * as socket type and NFS version.
1229 */
1230
1231 int
1232 nfs_connect(struct nfsmount *nmp, int verbose, int timeo)
1233 {
1234 struct nfs_socket_search nss;
1235 struct nfs_socket *nso, *nsonfs;
1236 struct sockaddr_storage ss;
1237 struct sockaddr *saddr, *oldsaddr;
1238 sock_upcall upcall;
1239 #if CONFIG_NFS4
1240 struct timeval now;
1241 #endif
1242 struct timeval start;
1243 int error, savederror, nfsvers;
1244 int tryv4 = 1;
1245 uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM;
1246 fhandle_t *fh = NULL;
1247 char *path = NULL;
1248 in_port_t port = 0;
1249 int addrtotal = 0;
1250
1251 /* paranoia... check that we have at least one address in the locations */
1252 uint32_t loc, serv;
1253 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
1254 for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) {
1255 addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount;
1256 if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) {
1257 NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n",
1258 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1259 nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name);
1260 }
1261 }
1262 }
1263
1264 if (addrtotal == 0) {
1265 NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n",
1266 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1267 return EINVAL;
1268 } else {
1269 NFS_SOCK_DBG("nfs connect %s has %d addresses\n",
1270 vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal);
1271 }
1272
1273 lck_mtx_lock(&nmp->nm_lock);
1274 nmp->nm_sockflags |= NMSOCK_CONNECTING;
1275 nmp->nm_nss = &nss;
1276 lck_mtx_unlock(&nmp->nm_lock);
1277 microuptime(&start);
1278 savederror = error = 0;
1279
1280 tryagain:
1281 /* initialize socket search state */
1282 bzero(&nss, sizeof(nss));
1283 nss.nss_addrcnt = addrtotal;
1284 nss.nss_error = savederror;
1285 TAILQ_INIT(&nss.nss_socklist);
1286 nss.nss_sotype = sotype;
1287 nss.nss_startloc = nmp->nm_locations.nl_current;
1288 nss.nss_timestamp = start.tv_sec;
1289 nss.nss_timeo = timeo;
1290 if (verbose) {
1291 nss.nss_flags |= NSS_VERBOSE;
1292 }
1293
1294 /* First time connecting, we may need to negotiate some things */
1295 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1296 NFS_SOCK_DBG("so_family = %d\n", nmp->nm_sofamily);
1297 NFS_SOCK_DBG("nfs port = %d local: <%s>\n", nmp->nm_nfsport, nmp->nm_nfs_localport ? nmp->nm_nfs_localport : "");
1298 NFS_SOCK_DBG("mount port = %d local: <%s>\n", nmp->nm_mountport, nmp->nm_mount_localport ? nmp->nm_mount_localport : "");
1299 if (!nmp->nm_vers) {
1300 /* No NFS version specified... */
1301 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1302 #if CONFIG_NFS4
1303 if (PVER2MAJOR(nmp->nm_max_vers) >= NFS_VER4 && tryv4) {
1304 nss.nss_port = NFS_PORT;
1305 nss.nss_protocol = NFS_PROG;
1306 nss.nss_version = 4;
1307 nss.nss_flags |= NSS_FALLBACK2PMAP;
1308 } else {
1309 #endif
1310 /* ...connect to portmapper first if we (may) need any ports. */
1311 nss.nss_port = PMAPPORT;
1312 nss.nss_protocol = PMAPPROG;
1313 nss.nss_version = 0;
1314 #if CONFIG_NFS4
1315 }
1316 #endif
1317 } else {
1318 /* ...connect to NFS port first. */
1319 nss.nss_port = nmp->nm_nfsport;
1320 nss.nss_protocol = NFS_PROG;
1321 nss.nss_version = 0;
1322 }
1323 #if CONFIG_NFS4
1324 } else if (nmp->nm_vers >= NFS_VER4) {
1325 if (tryv4) {
1326 /* For NFSv4, we use the given (or default) port. */
1327 nss.nss_port = nmp->nm_nfsport ? nmp->nm_nfsport : NFS_PORT;
1328 nss.nss_protocol = NFS_PROG;
1329 nss.nss_version = 4;
1330 /*
1331 * set NSS_FALLBACK2PMAP here to pick up any non standard port
1332 * if no port is specified on the mount;
1333 * Note nm_vers is set so we will only try NFS_VER4.
1334 */
1335 if (!nmp->nm_nfsport) {
1336 nss.nss_flags |= NSS_FALLBACK2PMAP;
1337 }
1338 } else {
1339 nss.nss_port = PMAPPORT;
1340 nss.nss_protocol = PMAPPROG;
1341 nss.nss_version = 0;
1342 }
1343 #endif
1344 } else {
1345 /* For NFSv3/v2... */
1346 if (!nmp->nm_nfsport || (!NM_OMATTR_GIVEN(nmp, FH) && !nmp->nm_mountport)) {
1347 /* ...connect to portmapper first if we need any ports. */
1348 nss.nss_port = PMAPPORT;
1349 nss.nss_protocol = PMAPPROG;
1350 nss.nss_version = 0;
1351 } else {
1352 /* ...connect to NFS port first. */
1353 nss.nss_port = nmp->nm_nfsport;
1354 nss.nss_protocol = NFS_PROG;
1355 nss.nss_version = nmp->nm_vers;
1356 }
1357 }
1358 NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n",
1359 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1360 nss.nss_protocol, nss.nss_version);
1361 } else {
1362 /* we've connected before, just connect to NFS port */
1363 if (!nmp->nm_nfsport) {
1364 /* need to ask portmapper which port that would be */
1365 nss.nss_port = PMAPPORT;
1366 nss.nss_protocol = PMAPPROG;
1367 nss.nss_version = 0;
1368 } else {
1369 nss.nss_port = nmp->nm_nfsport;
1370 nss.nss_protocol = NFS_PROG;
1371 nss.nss_version = nmp->nm_vers;
1372 }
1373 NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n",
1374 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port,
1375 nss.nss_protocol, nss.nss_version);
1376 }
1377
1378 /* Set next location to first valid location. */
1379 /* If start location is invalid, find next location. */
1380 nss.nss_nextloc = nss.nss_startloc;
1381 if ((nss.nss_nextloc.nli_serv >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servcount) ||
1382 (nss.nss_nextloc.nli_addr >= nmp->nm_locations.nl_locations[nss.nss_nextloc.nli_loc]->nl_servers[nss.nss_nextloc.nli_serv]->ns_addrcount)) {
1383 nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc);
1384 if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) {
1385 NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n",
1386 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1387 return ENOENT;
1388 }
1389 }
1390 nss.nss_last = -1;
1391
1392 keepsearching:
1393
1394 error = nfs_connect_search_loop(nmp, &nss);
1395 if (error || !nss.nss_sock) {
1396 /* search failed */
1397 nfs_socket_search_cleanup(&nss);
1398 if (nss.nss_flags & NSS_FALLBACK2PMAP) {
1399 tryv4 = 0;
1400 NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n",
1401 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1402 goto tryagain;
1403 }
1404
1405 if (!error && (nss.nss_sotype == SOCK_STREAM) && !nmp->nm_sotype && (nmp->nm_vers < NFS_VER4)) {
1406 /* Try using UDP */
1407 sotype = SOCK_DGRAM;
1408 savederror = nss.nss_error;
1409 NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n",
1410 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error);
1411 goto tryagain;
1412 }
1413 if (!error) {
1414 error = nss.nss_error ? nss.nss_error : ETIMEDOUT;
1415 }
1416 lck_mtx_lock(&nmp->nm_lock);
1417 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1418 nmp->nm_nss = NULL;
1419 lck_mtx_unlock(&nmp->nm_lock);
1420 if (nss.nss_flags & NSS_WARNED) {
1421 log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n",
1422 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1423 }
1424 if (fh) {
1425 NFS_ZFREE(nfs_fhandle_zone, fh);
1426 }
1427 if (path) {
1428 NFS_ZFREE(ZV_NAMEI, path);
1429 }
1430 NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n",
1431 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
1432 return error;
1433 }
1434
1435 /* try to use nss_sock */
1436 nso = nss.nss_sock;
1437 nss.nss_sock = NULL;
1438
1439 /* We may be speaking to portmap first... to determine port(s). */
1440 if (nso->nso_saddr->sa_family == AF_INET) {
1441 port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port);
1442 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1443 port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port);
1444 } else if (nso->nso_saddr->sa_family == AF_LOCAL) {
1445 if (nso->nso_protocol == PMAPPROG) {
1446 port = PMAPPORT;
1447 }
1448 }
1449
1450 if (port == PMAPPORT) {
1451 /* Use this portmapper port to get the port #s we need. */
1452 NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n",
1453 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1454
1455 /* remove the connect upcall so nfs_portmap_lookup() can use this socket */
1456 sock_setupcall(nso->nso_so, NULL, NULL);
1457
1458 /* Set up socket address and port for NFS socket. */
1459 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1460
1461 /* If NFS version not set, try nm_max_vers down to nm_min_vers */
1462 nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers);
1463 if (!(port = nmp->nm_nfsport)) {
1464 if (ss.ss_family == AF_INET) {
1465 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
1466 } else if (ss.ss_family == AF_INET6) {
1467 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
1468 } else if (ss.ss_family == AF_LOCAL) {
1469 if (((struct sockaddr_un*)&ss)->sun_path[0] == '/') {
1470 NFS_SOCK_DBG("Looking up NFS socket over %s\n", ((struct sockaddr_un*)&ss)->sun_path);
1471 }
1472 }
1473 for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) {
1474 if (nmp->nm_vers && nmp->nm_vers != nfsvers) {
1475 continue; /* Wrong version */
1476 }
1477 #if CONFIG_NFS4
1478 if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) {
1479 continue; /* NFSv4 does not do UDP */
1480 }
1481 #endif
1482 if (ss.ss_family == AF_LOCAL && nmp->nm_nfs_localport) {
1483 struct sockaddr_un *sun = (struct sockaddr_un *)&ss;
1484 NFS_SOCK_DBG("Using supplied local address %s for NFS_PROG\n", nmp->nm_nfs_localport);
1485 strlcpy(sun->sun_path, nmp->nm_nfs_localport, sizeof(sun->sun_path));
1486 error = 0;
1487 } else {
1488 NFS_SOCK_DBG("Calling Portmap/Rpcbind for NFS_PROG");
1489 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1490 nso->nso_so, NFS_PROG, nfsvers, nso->nso_sotype, timeo);
1491 }
1492 if (!error) {
1493 if (ss.ss_family == AF_INET) {
1494 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1495 } else if (ss.ss_family == AF_INET6) {
1496 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1497 } else if (ss.ss_family == AF_LOCAL) {
1498 port = ((struct sockaddr_un *)&ss)->sun_path[0] ? NFS_PORT : 0;
1499 }
1500 if (!port) {
1501 error = EPROGUNAVAIL;
1502 }
1503 #if CONFIG_NFS4
1504 if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) {
1505 continue; /* We already tried this */
1506 }
1507 #endif
1508 }
1509 if (!error) {
1510 break;
1511 }
1512 }
1513 if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) {
1514 error = EPROGUNAVAIL;
1515 }
1516 if (error) {
1517 nfs_socket_search_update_error(&nss, error);
1518 nfs_socket_destroy(nso);
1519 NFS_SOCK_DBG("Could not lookup NFS socket address for version %d error = %d\n", nfsvers, error);
1520 goto keepsearching;
1521 }
1522 } else if (nmp->nm_nfs_localport) {
1523 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_nfs_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1524 NFS_SOCK_DBG("Using supplied nfs_local_port %s for NFS_PROG\n", nmp->nm_nfs_localport);
1525 }
1526
1527 /* Create NFS protocol socket and add it to the list of sockets. */
1528 /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */
1529 if (ss.ss_family == AF_LOCAL) {
1530 NFS_SOCK_DBG("Creating NFS socket for %s port = %d\n", ((struct sockaddr_un*)&ss)->sun_path, port);
1531 }
1532 error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port,
1533 NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs);
1534 if (error) {
1535 nfs_socket_search_update_error(&nss, error);
1536 nfs_socket_destroy(nso);
1537 NFS_SOCK_DBG("Could not create NFS socket: %d\n", error);
1538 goto keepsearching;
1539 }
1540 nsonfs->nso_location = nso->nso_location;
1541 nsonfs->nso_wake = &nss;
1542 error = sock_setupcall(nsonfs->nso_so, nfs_connect_upcall, nsonfs);
1543 if (error) {
1544 nfs_socket_search_update_error(&nss, error);
1545 nfs_socket_destroy(nsonfs);
1546 nfs_socket_destroy(nso);
1547 NFS_SOCK_DBG("Could not nfs_connect_upcall: %d", error);
1548 goto keepsearching;
1549 }
1550 TAILQ_INSERT_TAIL(&nss.nss_socklist, nsonfs, nso_link);
1551 nss.nss_sockcnt++;
1552 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1553 /* Set up socket address and port for MOUNT socket. */
1554 error = 0;
1555 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1556 port = nmp->nm_mountport;
1557 NFS_SOCK_DBG("mount port = %d\n", port);
1558 if (ss.ss_family == AF_INET) {
1559 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1560 } else if (ss.ss_family == AF_INET6) {
1561 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1562 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1563 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1564 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1565 }
1566 if (!port) {
1567 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1568 /* If NFS version is unknown, optimistically choose for NFSv3. */
1569 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1570 int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
1571 NFS_SOCK_DBG("Looking up mount port with socket %p\n", nso->nso_so);
1572 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1573 nso->nso_so, RPCPROG_MNT, mntvers, mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
1574 }
1575 if (!error) {
1576 if (ss.ss_family == AF_INET) {
1577 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1578 } else if (ss.ss_family == AF_INET6) {
1579 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1580 } else if (ss.ss_family == AF_LOCAL) {
1581 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
1582 }
1583 if (!port) {
1584 error = EPROGUNAVAIL;
1585 }
1586 }
1587 /* create sockaddr for MOUNT */
1588 if (!error) {
1589 MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO);
1590 }
1591 if (!error && !nsonfs->nso_saddr2) {
1592 error = ENOMEM;
1593 }
1594 if (!error) {
1595 bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len);
1596 }
1597 if (error) {
1598 NFS_SOCK_DBG("Could not create mount sockaet address %d", error);
1599 lck_mtx_lock(&nsonfs->nso_lock);
1600 nsonfs->nso_error = error;
1601 nsonfs->nso_flags |= NSO_DEAD;
1602 lck_mtx_unlock(&nsonfs->nso_lock);
1603 }
1604 }
1605 NFS_SOCK_DBG("Destroying socket %p so %p\n", nso, nso->nso_so);
1606 nfs_socket_destroy(nso);
1607 goto keepsearching;
1608 }
1609
1610 /* nso is an NFS socket */
1611 NFS_SOCK_DBG("nfs connect %s got NFS socket %p\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso);
1612
1613 /* If NFS version wasn't specified, it was determined during the connect. */
1614 nfsvers = nmp->nm_vers ? nmp->nm_vers : (int)nso->nso_version;
1615
1616 /* Perform MOUNT call for initial NFSv2/v3 connection/mount. */
1617 if ((nfsvers < NFS_VER4) && !(nmp->nm_sockflags & NMSOCK_HASCONNECTED) && !NM_OMATTR_GIVEN(nmp, FH)) {
1618 error = 0;
1619 saddr = nso->nso_saddr2;
1620 if (!saddr) {
1621 /* Need sockaddr for MOUNT port */
1622 NFS_SOCK_DBG("Getting mount address mountport = %d, mount_localport = %s\n", nmp->nm_mountport, nmp->nm_mount_localport);
1623 bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len);
1624 port = nmp->nm_mountport;
1625 if (ss.ss_family == AF_INET) {
1626 ((struct sockaddr_in*)&ss)->sin_port = htons(port);
1627 } else if (ss.ss_family == AF_INET6) {
1628 ((struct sockaddr_in6*)&ss)->sin6_port = htons(port);
1629 } else if (ss.ss_family == AF_LOCAL && nmp->nm_mount_localport) {
1630 NFS_SOCK_DBG("Setting mount address to %s port = %d\n", nmp->nm_mount_localport, nmp->nm_mountport);
1631 strlcpy(((struct sockaddr_un*)&ss)->sun_path, nmp->nm_mount_localport, sizeof(((struct sockaddr_un*)&ss)->sun_path));
1632 }
1633 if (!port) {
1634 /* Get port/sockaddr for MOUNT version corresponding to NFS version. */
1635 int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
1636 int so_type = NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : nso->nso_sotype;
1637 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss,
1638 NULL, RPCPROG_MNT, mntvers, so_type, timeo);
1639 if (ss.ss_family == AF_INET) {
1640 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
1641 } else if (ss.ss_family == AF_INET6) {
1642 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
1643 }
1644 }
1645 if (!error) {
1646 if (port) {
1647 saddr = (struct sockaddr*)&ss;
1648 } else {
1649 error = EPROGUNAVAIL;
1650 }
1651 }
1652 }
1653 if (!error) {
1654 error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo);
1655 if (error) {
1656 nfs_socket_search_update_error(&nss, error);
1657 nfs_socket_destroy(nso);
1658 return error;
1659 }
1660 }
1661 if (saddr) {
1662 fh = zalloc(nfs_fhandle_zone);
1663 }
1664 if (saddr && fh) {
1665 path = zalloc(ZV_NAMEI);
1666 }
1667 if (!saddr || !fh || !path) {
1668 if (!error) {
1669 error = ENOMEM;
1670 }
1671 if (fh) {
1672 NFS_ZFREE(nfs_fhandle_zone, fh);
1673 }
1674 if (path) {
1675 NFS_ZFREE(ZV_NAMEI, path);
1676 }
1677 nfs_socket_search_update_error(&nss, error);
1678 nfs_socket_destroy(nso);
1679 goto keepsearching;
1680 }
1681 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1);
1682 error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers,
1683 path, vfs_context_current(), timeo, fh, &nmp->nm_servsec);
1684 NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n",
1685 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1686 if (!error) {
1687 /* Make sure we can agree on a security flavor. */
1688 int o, s; /* indices into mount option and server security flavor lists */
1689 int found = 0;
1690
1691 if ((nfsvers == NFS_VER3) && !nmp->nm_servsec.count) {
1692 /* Some servers return an empty list to indicate RPCAUTH_SYS? */
1693 nmp->nm_servsec.count = 1;
1694 nmp->nm_servsec.flavors[0] = RPCAUTH_SYS;
1695 }
1696 if (nmp->nm_sec.count) {
1697 /* Choose the first flavor in our list that the server supports. */
1698 if (!nmp->nm_servsec.count) {
1699 /* we don't know what the server supports, just use our first choice */
1700 nmp->nm_auth = nmp->nm_sec.flavors[0];
1701 found = 1;
1702 }
1703 for (o = 0; !found && (o < nmp->nm_sec.count); o++) {
1704 for (s = 0; !found && (s < nmp->nm_servsec.count); s++) {
1705 if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) {
1706 nmp->nm_auth = nmp->nm_sec.flavors[o];
1707 found = 1;
1708 }
1709 }
1710 }
1711 } else {
1712 /* Choose the first one we support from the server's list. */
1713 if (!nmp->nm_servsec.count) {
1714 nmp->nm_auth = RPCAUTH_SYS;
1715 found = 1;
1716 }
1717 for (s = 0; s < nmp->nm_servsec.count; s++) {
1718 switch (nmp->nm_servsec.flavors[s]) {
1719 case RPCAUTH_SYS:
1720 /* prefer RPCAUTH_SYS to RPCAUTH_NONE */
1721 if (found && (nmp->nm_auth == RPCAUTH_NONE)) {
1722 found = 0;
1723 }
1724 OS_FALLTHROUGH;
1725 case RPCAUTH_NONE:
1726 case RPCAUTH_KRB5:
1727 case RPCAUTH_KRB5I:
1728 case RPCAUTH_KRB5P:
1729 if (!found) {
1730 nmp->nm_auth = nmp->nm_servsec.flavors[s];
1731 found = 1;
1732 }
1733 break;
1734 }
1735 }
1736 }
1737 error = !found ? EAUTH : 0;
1738 }
1739 NFS_ZFREE(ZV_NAMEI, path);
1740 if (error) {
1741 nfs_socket_search_update_error(&nss, error);
1742 NFS_ZFREE(nfs_fhandle_zone, fh);
1743 nfs_socket_destroy(nso);
1744 goto keepsearching;
1745 }
1746 if (nmp->nm_fh) {
1747 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
1748 }
1749 nmp->nm_fh = fh;
1750 fh = NULL;
1751 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
1752 }
1753
1754 /* put the real upcall in place */
1755 upcall = (nso->nso_sotype == SOCK_STREAM) ? nfs_tcp_rcv : nfs_udp_rcv;
1756 error = sock_setupcall(nso->nso_so, upcall, nmp);
1757 if (error) {
1758 nfs_socket_search_update_error(&nss, error);
1759 nfs_socket_destroy(nso);
1760 goto keepsearching;
1761 }
1762
1763 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1764 /* set mntfromname to this location */
1765 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
1766 nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location,
1767 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1768 sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0);
1769 }
1770 /* some negotiated values need to remain unchanged for the life of the mount */
1771 if (!nmp->nm_sotype) {
1772 nmp->nm_sotype = nso->nso_sotype;
1773 }
1774 if (!nmp->nm_vers) {
1775 nmp->nm_vers = nfsvers;
1776 #if CONFIG_NFS4
1777 /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */
1778 if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1779 if (nso->nso_saddr->sa_family == AF_INET) {
1780 port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port);
1781 } else if (nso->nso_saddr->sa_family == AF_INET6) {
1782 port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port);
1783 } else {
1784 port = 0;
1785 }
1786 if (port == NFS_PORT) {
1787 nmp->nm_nfsport = NFS_PORT;
1788 }
1789 }
1790 #endif
1791 }
1792 #if CONFIG_NFS4
1793 /* do some version-specific pre-mount set up */
1794 if (nmp->nm_vers >= NFS_VER4) {
1795 microtime(&now);
1796 nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec;
1797 if (!NMFLAG(nmp, NOCALLBACK)) {
1798 nfs4_mount_callback_setup(nmp);
1799 }
1800 }
1801 #endif
1802 }
1803
1804 /* Initialize NFS socket state variables */
1805 lck_mtx_lock(&nmp->nm_lock);
1806 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
1807 nmp->nm_srtt[3] = (NFS_TIMEO << 3);
1808 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
1809 nmp->nm_sdrtt[3] = 0;
1810 if (nso->nso_sotype == SOCK_DGRAM) {
1811 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
1812 nmp->nm_sent = 0;
1813 } else if (nso->nso_sotype == SOCK_STREAM) {
1814 nmp->nm_timeouts = 0;
1815 }
1816 nmp->nm_sockflags &= ~NMSOCK_CONNECTING;
1817 nmp->nm_sockflags |= NMSOCK_SETUP;
1818 /* move the socket to the mount structure */
1819 nmp->nm_nso = nso;
1820 oldsaddr = nmp->nm_saddr;
1821 nmp->nm_saddr = nso->nso_saddr;
1822 lck_mtx_unlock(&nmp->nm_lock);
1823 error = nfs_connect_setup(nmp);
1824 lck_mtx_lock(&nmp->nm_lock);
1825 nmp->nm_sockflags &= ~NMSOCK_SETUP;
1826 if (!error) {
1827 nmp->nm_sockflags |= NMSOCK_READY;
1828 wakeup(&nmp->nm_sockflags);
1829 }
1830 if (error) {
1831 NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n",
1832 vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error);
1833 nfs_socket_search_update_error(&nss, error);
1834 nmp->nm_saddr = oldsaddr;
1835 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1836 /* undo settings made prior to setup */
1837 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) {
1838 nmp->nm_sotype = 0;
1839 }
1840 #if CONFIG_NFS4
1841 if (nmp->nm_vers >= NFS_VER4) {
1842 if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) {
1843 nmp->nm_nfsport = 0;
1844 }
1845 if (nmp->nm_cbid) {
1846 nfs4_mount_callback_shutdown(nmp);
1847 }
1848 if (IS_VALID_CRED(nmp->nm_mcred)) {
1849 kauth_cred_unref(&nmp->nm_mcred);
1850 }
1851 bzero(&nmp->nm_un, sizeof(nmp->nm_un));
1852 }
1853 #endif
1854 nmp->nm_vers = 0;
1855 }
1856 lck_mtx_unlock(&nmp->nm_lock);
1857 nmp->nm_nso = NULL;
1858 nfs_socket_destroy(nso);
1859 goto keepsearching;
1860 }
1861
1862 /* update current location */
1863 if ((nmp->nm_locations.nl_current.nli_flags & NLI_VALID) &&
1864 (nmp->nm_locations.nl_current.nli_serv != nso->nso_location.nli_serv)) {
1865 /* server has changed, we should initiate failover/recovery */
1866 // XXX
1867 }
1868 nmp->nm_locations.nl_current = nso->nso_location;
1869 nmp->nm_locations.nl_current.nli_flags |= NLI_VALID;
1870
1871 if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) {
1872 /* We have now successfully connected... make a note of it. */
1873 nmp->nm_sockflags |= NMSOCK_HASCONNECTED;
1874 }
1875
1876 lck_mtx_unlock(&nmp->nm_lock);
1877 if (oldsaddr) {
1878 FREE(oldsaddr, M_SONAME);
1879 }
1880
1881 if (nss.nss_flags & NSS_WARNED) {
1882 log(LOG_INFO, "nfs_connect: socket connect completed for %s\n",
1883 vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1884 }
1885
1886 nmp->nm_nss = NULL;
1887 nfs_socket_search_cleanup(&nss);
1888 if (fh) {
1889 NFS_ZFREE(nfs_fhandle_zone, fh);
1890 }
1891 if (path) {
1892 NFS_ZFREE(ZV_NAMEI, path);
1893 }
1894 NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
1895 return 0;
1896 }
1897
1898
1899 /* setup & confirm socket connection is functional */
1900 int
1901 nfs_connect_setup(
1902 #if !CONFIG_NFS4
1903 __unused
1904 #endif
1905 struct nfsmount *nmp)
1906 {
1907 int error = 0;
1908 #if CONFIG_NFS4
1909 if (nmp->nm_vers >= NFS_VER4) {
1910 if (nmp->nm_state & NFSSTA_CLIENTID) {
1911 /* first, try to renew our current state */
1912 error = nfs4_renew(nmp, R_SETUP);
1913 if ((error == NFSERR_ADMIN_REVOKED) ||
1914 (error == NFSERR_CB_PATH_DOWN) ||
1915 (error == NFSERR_EXPIRED) ||
1916 (error == NFSERR_LEASE_MOVED) ||
1917 (error == NFSERR_STALE_CLIENTID)) {
1918 lck_mtx_lock(&nmp->nm_lock);
1919 nfs_need_recover(nmp, error);
1920 lck_mtx_unlock(&nmp->nm_lock);
1921 }
1922 }
1923 error = nfs4_setclientid(nmp);
1924 }
1925 #endif
1926 return error;
1927 }
1928
1929 /*
1930 * NFS socket reconnect routine:
1931 * Called when a connection is broken.
1932 * - disconnect the old socket
1933 * - nfs_connect() again
1934 * - set R_MUSTRESEND for all outstanding requests on mount point
1935 * If this fails the mount point is DEAD!
1936 */
1937 int
1938 nfs_reconnect(struct nfsmount *nmp)
1939 {
1940 struct nfsreq *rq;
1941 struct timeval now;
1942 thread_t thd = current_thread();
1943 int error, wentdown = 0, verbose = 1;
1944 time_t lastmsg;
1945 int timeo;
1946
1947 microuptime(&now);
1948 lastmsg = now.tv_sec - (nmp->nm_tprintf_delay - nmp->nm_tprintf_initial_delay);
1949
1950 nfs_disconnect(nmp);
1951
1952
1953 lck_mtx_lock(&nmp->nm_lock);
1954 timeo = nfs_is_squishy(nmp) ? 8 : 30;
1955 lck_mtx_unlock(&nmp->nm_lock);
1956
1957 while ((error = nfs_connect(nmp, verbose, timeo))) {
1958 verbose = 0;
1959 nfs_disconnect(nmp);
1960 if ((error == EINTR) || (error == ERESTART)) {
1961 return EINTR;
1962 }
1963 if (error == EIO) {
1964 return EIO;
1965 }
1966 microuptime(&now);
1967 if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) {
1968 lastmsg = now.tv_sec;
1969 nfs_down(nmp, thd, error, NFSSTA_TIMEO, "can not connect", 0);
1970 wentdown = 1;
1971 }
1972 lck_mtx_lock(&nmp->nm_lock);
1973 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
1974 /* we're not yet completely mounted and */
1975 /* we can't reconnect, so we fail */
1976 lck_mtx_unlock(&nmp->nm_lock);
1977 NFS_SOCK_DBG("Not mounted returning %d\n", error);
1978 return error;
1979 }
1980
1981 if (nfs_mount_check_dead_timeout(nmp)) {
1982 nfs_mount_make_zombie(nmp);
1983 lck_mtx_unlock(&nmp->nm_lock);
1984 return ENXIO;
1985 }
1986
1987 if ((error = nfs_sigintr(nmp, NULL, thd, 1))) {
1988 lck_mtx_unlock(&nmp->nm_lock);
1989 return error;
1990 }
1991 lck_mtx_unlock(&nmp->nm_lock);
1992 tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz);
1993 if ((error = nfs_sigintr(nmp, NULL, thd, 0))) {
1994 return error;
1995 }
1996 }
1997
1998 if (wentdown) {
1999 nfs_up(nmp, thd, NFSSTA_TIMEO, "connected");
2000 }
2001
2002 /*
2003 * Loop through outstanding request list and mark all requests
2004 * as needing a resend. (Though nfs_need_reconnect() probably
2005 * marked them all already.)
2006 */
2007 lck_mtx_lock(&nfs_request_mutex);
2008 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2009 if (rq->r_nmp == nmp) {
2010 lck_mtx_lock(&rq->r_mtx);
2011 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2012 rq->r_flags |= R_MUSTRESEND;
2013 rq->r_rtt = -1;
2014 wakeup(rq);
2015 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2016 nfs_asyncio_resend(rq);
2017 }
2018 }
2019 lck_mtx_unlock(&rq->r_mtx);
2020 }
2021 }
2022 lck_mtx_unlock(&nfs_request_mutex);
2023 return 0;
2024 }
2025
2026 /*
2027 * NFS disconnect. Clean up and unlink.
2028 */
2029 void
2030 nfs_disconnect(struct nfsmount *nmp)
2031 {
2032 struct nfs_socket *nso;
2033
2034 lck_mtx_lock(&nmp->nm_lock);
2035 tryagain:
2036 if (nmp->nm_nso) {
2037 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2038 if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */
2039 nmp->nm_state |= NFSSTA_WANTSND;
2040 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts);
2041 goto tryagain;
2042 }
2043 if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */
2044 msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts);
2045 goto tryagain;
2046 }
2047 nmp->nm_sockflags |= NMSOCK_DISCONNECTING;
2048 nmp->nm_sockflags &= ~NMSOCK_READY;
2049 nso = nmp->nm_nso;
2050 nmp->nm_nso = NULL;
2051 if (nso->nso_saddr == nmp->nm_saddr) {
2052 nso->nso_saddr = NULL;
2053 }
2054 lck_mtx_unlock(&nmp->nm_lock);
2055 nfs_socket_destroy(nso);
2056 lck_mtx_lock(&nmp->nm_lock);
2057 nmp->nm_sockflags &= ~NMSOCK_DISCONNECTING;
2058 lck_mtx_unlock(&nmp->nm_lock);
2059 } else {
2060 lck_mtx_unlock(&nmp->nm_lock);
2061 }
2062 }
2063
2064 /*
2065 * mark an NFS mount as needing a reconnect/resends.
2066 */
2067 void
2068 nfs_need_reconnect(struct nfsmount *nmp)
2069 {
2070 struct nfsreq *rq;
2071
2072 lck_mtx_lock(&nmp->nm_lock);
2073 nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP);
2074 lck_mtx_unlock(&nmp->nm_lock);
2075
2076 /*
2077 * Loop through outstanding request list and
2078 * mark all requests as needing a resend.
2079 */
2080 lck_mtx_lock(&nfs_request_mutex);
2081 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
2082 if (rq->r_nmp == nmp) {
2083 lck_mtx_lock(&rq->r_mtx);
2084 if (!rq->r_error && !rq->r_nmrep.nmc_mhead && !(rq->r_flags & R_MUSTRESEND)) {
2085 rq->r_flags |= R_MUSTRESEND;
2086 rq->r_rtt = -1;
2087 wakeup(rq);
2088 if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
2089 nfs_asyncio_resend(rq);
2090 }
2091 }
2092 lck_mtx_unlock(&rq->r_mtx);
2093 }
2094 }
2095 lck_mtx_unlock(&nfs_request_mutex);
2096 }
2097
2098
2099 /*
2100 * thread to handle miscellaneous async NFS socket work (reconnects/resends)
2101 */
2102 void
2103 nfs_mount_sock_thread(void *arg, __unused wait_result_t wr)
2104 {
2105 struct nfsmount *nmp = arg;
2106 struct timespec ts = { .tv_sec = 30, .tv_nsec = 0 };
2107 thread_t thd = current_thread();
2108 struct nfsreq *req;
2109 struct timeval now;
2110 int error, dofinish;
2111 nfsnode_t np;
2112 int do_reconnect_sleep = 0;
2113
2114 lck_mtx_lock(&nmp->nm_lock);
2115 while (!(nmp->nm_sockflags & NMSOCK_READY) ||
2116 !TAILQ_EMPTY(&nmp->nm_resendq) ||
2117 !LIST_EMPTY(&nmp->nm_monlist) ||
2118 nmp->nm_deadto_start ||
2119 (nmp->nm_state & NFSSTA_RECOVER) ||
2120 ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) {
2121 if (nmp->nm_sockflags & NMSOCK_UNMOUNT) {
2122 break;
2123 }
2124 /* do reconnect, if necessary */
2125 if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2126 if (nmp->nm_reconnect_start <= 0) {
2127 microuptime(&now);
2128 nmp->nm_reconnect_start = now.tv_sec;
2129 }
2130 lck_mtx_unlock(&nmp->nm_lock);
2131 NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2132 /*
2133 * XXX We don't want to call reconnect again right away if returned errors
2134 * before that may not have blocked. This has caused spamming null procs
2135 * from machines in the pass.
2136 */
2137 if (do_reconnect_sleep) {
2138 tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz);
2139 }
2140 error = nfs_reconnect(nmp);
2141 if (error) {
2142 int lvl = 7;
2143 if (error == EIO || error == EINTR) {
2144 lvl = (do_reconnect_sleep++ % 600) ? 7 : 0;
2145 }
2146 NFS_DBG(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n",
2147 vfs_statfs(nmp->nm_mountp)->f_mntfromname, error);
2148 } else {
2149 nmp->nm_reconnect_start = 0;
2150 do_reconnect_sleep = 0;
2151 }
2152 lck_mtx_lock(&nmp->nm_lock);
2153 }
2154 if ((nmp->nm_sockflags & NMSOCK_READY) &&
2155 (nmp->nm_state & NFSSTA_RECOVER) &&
2156 !(nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2157 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2158 /* perform state recovery */
2159 lck_mtx_unlock(&nmp->nm_lock);
2160 nfs_recover(nmp);
2161 lck_mtx_lock(&nmp->nm_lock);
2162 }
2163 #if CONFIG_NFS4
2164 /* handle NFSv4 delegation returns */
2165 while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
2166 (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) &&
2167 ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) {
2168 lck_mtx_unlock(&nmp->nm_lock);
2169 nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred);
2170 lck_mtx_lock(&nmp->nm_lock);
2171 }
2172 #endif
2173 /* do resends, if necessary/possible */
2174 while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) ||
2175 (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) &&
2176 ((req = TAILQ_FIRST(&nmp->nm_resendq)))) {
2177 if (req->r_resendtime) {
2178 microuptime(&now);
2179 }
2180 while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) {
2181 req = TAILQ_NEXT(req, r_rchain);
2182 }
2183 if (!req) {
2184 break;
2185 }
2186 /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */
2187 lck_mtx_unlock(&nmp->nm_lock);
2188 lck_mtx_lock(&req->r_mtx);
2189 lck_mtx_lock(&nmp->nm_lock);
2190 if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) {
2191 lck_mtx_unlock(&req->r_mtx);
2192 continue;
2193 }
2194 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
2195 req->r_flags &= ~R_RESENDQ;
2196 req->r_rchain.tqe_next = NFSREQNOLIST;
2197 lck_mtx_unlock(&nmp->nm_lock);
2198 /* Note that we have a reference on the request that was taken nfs_asyncio_resend */
2199 if (req->r_error || req->r_nmrep.nmc_mhead) {
2200 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2201 wakeup(req);
2202 lck_mtx_unlock(&req->r_mtx);
2203 if (dofinish) {
2204 nfs_asyncio_finish(req);
2205 }
2206 nfs_request_rele(req);
2207 lck_mtx_lock(&nmp->nm_lock);
2208 continue;
2209 }
2210 if ((req->r_flags & R_RESTART) || nfs_request_using_gss(req)) {
2211 req->r_flags &= ~R_RESTART;
2212 req->r_resendtime = 0;
2213 lck_mtx_unlock(&req->r_mtx);
2214 /* async RPCs on GSS mounts need to be rebuilt and resent. */
2215 nfs_reqdequeue(req);
2216 #if CONFIG_NFS_GSS
2217 if (nfs_request_using_gss(req)) {
2218 nfs_gss_clnt_rpcdone(req);
2219 error = nfs_gss_clnt_args_restore(req);
2220 if (error == ENEEDAUTH) {
2221 req->r_xid = 0;
2222 }
2223 }
2224 #endif /* CONFIG_NFS_GSS */
2225 NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n",
2226 nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid,
2227 req->r_flags, req->r_rtt);
2228 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2229 if (!error) {
2230 error = nfs_request_add_header(req);
2231 }
2232 if (!error) {
2233 error = nfs_request_send(req, 0);
2234 }
2235 lck_mtx_lock(&req->r_mtx);
2236 if (error) {
2237 req->r_error = error;
2238 }
2239 wakeup(req);
2240 dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2241 lck_mtx_unlock(&req->r_mtx);
2242 if (dofinish) {
2243 nfs_asyncio_finish(req);
2244 }
2245 nfs_request_rele(req);
2246 lck_mtx_lock(&nmp->nm_lock);
2247 error = 0;
2248 continue;
2249 }
2250 NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n",
2251 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
2252 error = nfs_sigintr(nmp, req, req->r_thread, 0);
2253 if (!error) {
2254 req->r_flags |= R_SENDING;
2255 lck_mtx_unlock(&req->r_mtx);
2256 error = nfs_send(req, 0);
2257 lck_mtx_lock(&req->r_mtx);
2258 if (!error) {
2259 wakeup(req);
2260 lck_mtx_unlock(&req->r_mtx);
2261 nfs_request_rele(req);
2262 lck_mtx_lock(&nmp->nm_lock);
2263 continue;
2264 }
2265 }
2266 req->r_error = error;
2267 wakeup(req);
2268 dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT);
2269 lck_mtx_unlock(&req->r_mtx);
2270 if (dofinish) {
2271 nfs_asyncio_finish(req);
2272 }
2273 nfs_request_rele(req);
2274 lck_mtx_lock(&nmp->nm_lock);
2275 }
2276 if (nfs_mount_check_dead_timeout(nmp)) {
2277 nfs_mount_make_zombie(nmp);
2278 break;
2279 }
2280
2281 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
2282 break;
2283 }
2284 /* check monitored nodes, if necessary/possible */
2285 if (!LIST_EMPTY(&nmp->nm_monlist)) {
2286 nmp->nm_state |= NFSSTA_MONITOR_SCAN;
2287 LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) {
2288 if (!(nmp->nm_sockflags & NMSOCK_READY) ||
2289 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2290 break;
2291 }
2292 np->n_mflag |= NMMONSCANINPROG;
2293 lck_mtx_unlock(&nmp->nm_lock);
2294 error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR));
2295 if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */
2296 nfs_data_update_size(np, 0);
2297 }
2298 lck_mtx_lock(&nmp->nm_lock);
2299 np->n_mflag &= ~NMMONSCANINPROG;
2300 if (np->n_mflag & NMMONSCANWANT) {
2301 np->n_mflag &= ~NMMONSCANWANT;
2302 wakeup(&np->n_mflag);
2303 }
2304 if (error || !(nmp->nm_sockflags & NMSOCK_READY) ||
2305 (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) {
2306 break;
2307 }
2308 }
2309 nmp->nm_state &= ~NFSSTA_MONITOR_SCAN;
2310 if (nmp->nm_state & NFSSTA_UNMOUNTING) {
2311 wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */
2312 }
2313 }
2314 if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) {
2315 if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) ||
2316 (nmp->nm_state & NFSSTA_RECOVER)) {
2317 ts.tv_sec = 1;
2318 } else {
2319 ts.tv_sec = 5;
2320 }
2321 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts);
2322 }
2323 }
2324
2325 /* If we're unmounting, send the unmount RPC, if requested/appropriate. */
2326 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) &&
2327 (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) &&
2328 (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
2329 lck_mtx_unlock(&nmp->nm_lock);
2330 nfs3_umount_rpc(nmp, vfs_context_kernel(),
2331 (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2);
2332 lck_mtx_lock(&nmp->nm_lock);
2333 }
2334
2335 if (nmp->nm_sockthd == thd) {
2336 nmp->nm_sockthd = NULL;
2337 }
2338 lck_mtx_unlock(&nmp->nm_lock);
2339 wakeup(&nmp->nm_sockthd);
2340 thread_terminate(thd);
2341 }
2342
2343 /* start or wake a mount's socket thread */
2344 void
2345 nfs_mount_sock_thread_wake(struct nfsmount *nmp)
2346 {
2347 if (nmp->nm_sockthd) {
2348 wakeup(&nmp->nm_sockthd);
2349 } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) {
2350 thread_deallocate(nmp->nm_sockthd);
2351 }
2352 }
2353
2354 /*
2355 * Check if we should mark the mount dead because the
2356 * unresponsive mount has reached the dead timeout.
2357 * (must be called with nmp locked)
2358 */
2359 int
2360 nfs_mount_check_dead_timeout(struct nfsmount *nmp)
2361 {
2362 struct timeval now;
2363
2364 if (nmp->nm_state & NFSSTA_DEAD) {
2365 return 1;
2366 }
2367 if (nmp->nm_deadto_start == 0) {
2368 return 0;
2369 }
2370 nfs_is_squishy(nmp);
2371 if (nmp->nm_curdeadtimeout <= 0) {
2372 return 0;
2373 }
2374 microuptime(&now);
2375 if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) {
2376 return 0;
2377 }
2378 return 1;
2379 }
2380
2381 /*
2382 * Call nfs_mount_zombie to remove most of the
2383 * nfs state for the mount, and then ask to be forcibly unmounted.
2384 *
2385 * Assumes the nfs mount structure lock nm_lock is held.
2386 */
2387
2388 void
2389 nfs_mount_make_zombie(struct nfsmount *nmp)
2390 {
2391 fsid_t fsid;
2392
2393 if (!nmp) {
2394 return;
2395 }
2396
2397 if (nmp->nm_state & NFSSTA_DEAD) {
2398 return;
2399 }
2400
2401 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
2402 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
2403 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
2404 lck_mtx_unlock(&nmp->nm_lock);
2405 nfs_mount_zombie(nmp, NFSSTA_DEAD);
2406 vfs_event_signal(&fsid, VQ_DEAD, 0);
2407 lck_mtx_lock(&nmp->nm_lock);
2408 }
2409
2410
2411 /*
2412 * NFS callback channel socket state
2413 */
2414 struct nfs_callback_socket {
2415 TAILQ_ENTRY(nfs_callback_socket) ncbs_link;
2416 socket_t ncbs_so; /* the socket */
2417 struct sockaddr_storage ncbs_saddr; /* socket address */
2418 struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */
2419 time_t ncbs_stamp; /* last accessed at */
2420 uint32_t ncbs_flags; /* see below */
2421 };
2422 #define NCBSOCK_UPCALL 0x0001
2423 #define NCBSOCK_UPCALLWANT 0x0002
2424 #define NCBSOCK_DEAD 0x0004
2425
2426 #if CONFIG_NFS4
2427 /*
2428 * NFS callback channel state
2429 *
2430 * One listening socket for accepting socket connections from servers and
2431 * a list of connected sockets to handle callback requests on.
2432 * Mounts registered with the callback channel are assigned IDs and
2433 * put on a list so that the callback request handling code can match
2434 * the requests up with mounts.
2435 */
2436 socket_t nfs4_cb_so = NULL;
2437 socket_t nfs4_cb_so6 = NULL;
2438 in_port_t nfs4_cb_port = 0;
2439 in_port_t nfs4_cb_port6 = 0;
2440 uint32_t nfs4_cb_id = 0;
2441 uint32_t nfs4_cb_so_usecount = 0;
2442 TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks;
2443 TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts;
2444
2445 int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t);
2446
2447 /*
2448 * Set up the callback channel for the NFS mount.
2449 *
2450 * Initializes the callback channel socket state and
2451 * assigns a callback ID to the mount.
2452 */
2453 void
2454 nfs4_mount_callback_setup(struct nfsmount *nmp)
2455 {
2456 struct sockaddr_in sin;
2457 struct sockaddr_in6 sin6;
2458 socket_t so = NULL;
2459 socket_t so6 = NULL;
2460 struct timeval timeo;
2461 int error, on = 1;
2462 in_port_t port;
2463
2464 lck_mtx_lock(&nfs_global_mutex);
2465 if (nfs4_cb_id == 0) {
2466 TAILQ_INIT(&nfs4_cb_mounts);
2467 TAILQ_INIT(&nfs4_cb_socks);
2468 nfs4_cb_id++;
2469 }
2470 nmp->nm_cbid = nfs4_cb_id++;
2471 if (nmp->nm_cbid == 0) {
2472 nmp->nm_cbid = nfs4_cb_id++;
2473 }
2474 nfs4_cb_so_usecount++;
2475 TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink);
2476
2477 if (nfs4_cb_so) {
2478 lck_mtx_unlock(&nfs_global_mutex);
2479 return;
2480 }
2481
2482 /* IPv4 */
2483 error = sock_socket(AF_INET, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so);
2484 if (error) {
2485 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv4 socket\n", error);
2486 goto fail;
2487 }
2488 so = nfs4_cb_so;
2489
2490 if (NFS_PORT_INVALID(nfs_callback_port)) {
2491 error = EINVAL;
2492 log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port);
2493 goto fail;
2494 }
2495
2496 sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2497 sin.sin_len = sizeof(struct sockaddr_in);
2498 sin.sin_family = AF_INET;
2499 sin.sin_addr.s_addr = htonl(INADDR_ANY);
2500 sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */
2501 error = sock_bind(so, (struct sockaddr *)&sin);
2502 if (error) {
2503 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error);
2504 goto fail;
2505 }
2506 error = sock_getsockname(so, (struct sockaddr *)&sin, sin.sin_len);
2507 if (error) {
2508 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv4 socket port\n", error);
2509 goto fail;
2510 }
2511 nfs4_cb_port = ntohs(sin.sin_port);
2512
2513 error = sock_listen(so, 32);
2514 if (error) {
2515 log(LOG_INFO, "nfs callback setup: error %d on IPv4 listen\n", error);
2516 goto fail;
2517 }
2518
2519 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2520 timeo.tv_usec = 0;
2521 timeo.tv_sec = 60;
2522 error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2523 if (error) {
2524 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error);
2525 }
2526 error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2527 if (error) {
2528 log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error);
2529 }
2530 sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2531 sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2532 sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2533 error = 0;
2534
2535 /* IPv6 */
2536 error = sock_socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, nfs4_cb_accept, NULL, &nfs4_cb_so6);
2537 if (error) {
2538 log(LOG_INFO, "nfs callback setup: error %d creating listening IPv6 socket\n", error);
2539 goto fail;
2540 }
2541 so6 = nfs4_cb_so6;
2542
2543 sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2544 sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on));
2545 /* try to use specified port or same port as IPv4 */
2546 port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port;
2547 ipv6_bind_again:
2548 sin6.sin6_len = sizeof(struct sockaddr_in6);
2549 sin6.sin6_family = AF_INET6;
2550 sin6.sin6_addr = in6addr_any;
2551 sin6.sin6_port = htons(port);
2552 error = sock_bind(so6, (struct sockaddr *)&sin6);
2553 if (error) {
2554 if (port != nfs_callback_port) {
2555 /* if we simply tried to match the IPv4 port, then try any port */
2556 port = 0;
2557 goto ipv6_bind_again;
2558 }
2559 log(LOG_INFO, "nfs callback setup: error %d binding listening IPv6 socket\n", error);
2560 goto fail;
2561 }
2562 error = sock_getsockname(so6, (struct sockaddr *)&sin6, sin6.sin6_len);
2563 if (error) {
2564 log(LOG_INFO, "nfs callback setup: error %d getting listening IPv6 socket port\n", error);
2565 goto fail;
2566 }
2567 nfs4_cb_port6 = ntohs(sin6.sin6_port);
2568
2569 error = sock_listen(so6, 32);
2570 if (error) {
2571 log(LOG_INFO, "nfs callback setup: error %d on IPv6 listen\n", error);
2572 goto fail;
2573 }
2574
2575 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2576 timeo.tv_usec = 0;
2577 timeo.tv_sec = 60;
2578 error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2579 if (error) {
2580 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error);
2581 }
2582 error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2583 if (error) {
2584 log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error);
2585 }
2586 sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2587 sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2588 sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2589 error = 0;
2590
2591 fail:
2592 if (error) {
2593 nfs4_cb_so = nfs4_cb_so6 = NULL;
2594 lck_mtx_unlock(&nfs_global_mutex);
2595 if (so) {
2596 sock_shutdown(so, SHUT_RDWR);
2597 sock_close(so);
2598 }
2599 if (so6) {
2600 sock_shutdown(so6, SHUT_RDWR);
2601 sock_close(so6);
2602 }
2603 } else {
2604 lck_mtx_unlock(&nfs_global_mutex);
2605 }
2606 }
2607
2608 /*
2609 * Shut down the callback channel for the NFS mount.
2610 *
2611 * Clears the mount's callback ID and releases the mounts
2612 * reference on the callback socket. Last reference dropped
2613 * will also shut down the callback socket(s).
2614 */
2615 void
2616 nfs4_mount_callback_shutdown(struct nfsmount *nmp)
2617 {
2618 struct nfs_callback_socket *ncbsp;
2619 socket_t so, so6;
2620 struct nfs4_cb_sock_list cb_socks;
2621 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2622
2623 lck_mtx_lock(&nfs_global_mutex);
2624 if (nmp->nm_cbid == 0) {
2625 lck_mtx_unlock(&nfs_global_mutex);
2626 return;
2627 }
2628 TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink);
2629 /* wait for any callbacks in progress to complete */
2630 while (nmp->nm_cbrefs) {
2631 msleep(&nmp->nm_cbrefs, &nfs_global_mutex, PSOCK, "cbshutwait", &ts);
2632 }
2633 nmp->nm_cbid = 0;
2634 if (--nfs4_cb_so_usecount) {
2635 lck_mtx_unlock(&nfs_global_mutex);
2636 return;
2637 }
2638 so = nfs4_cb_so;
2639 so6 = nfs4_cb_so6;
2640 nfs4_cb_so = nfs4_cb_so6 = NULL;
2641 TAILQ_INIT(&cb_socks);
2642 TAILQ_CONCAT(&cb_socks, &nfs4_cb_socks, ncbs_link);
2643 lck_mtx_unlock(&nfs_global_mutex);
2644 if (so) {
2645 sock_shutdown(so, SHUT_RDWR);
2646 sock_close(so);
2647 }
2648 if (so6) {
2649 sock_shutdown(so6, SHUT_RDWR);
2650 sock_close(so6);
2651 }
2652 while ((ncbsp = TAILQ_FIRST(&cb_socks))) {
2653 TAILQ_REMOVE(&cb_socks, ncbsp, ncbs_link);
2654 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2655 sock_close(ncbsp->ncbs_so);
2656 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2657 FREE(ncbsp, M_TEMP);
2658 }
2659 }
2660
2661 /*
2662 * Check periodically for stale/unused nfs callback sockets
2663 */
2664 #define NFS4_CB_TIMER_PERIOD 30
2665 #define NFS4_CB_IDLE_MAX 300
2666 void
2667 nfs4_callback_timer(__unused void *param0, __unused void *param1)
2668 {
2669 struct nfs_callback_socket *ncbsp, *nextncbsp;
2670 struct timeval now;
2671
2672 loop:
2673 lck_mtx_lock(&nfs_global_mutex);
2674 if (TAILQ_EMPTY(&nfs4_cb_socks)) {
2675 nfs4_callback_timer_on = 0;
2676 lck_mtx_unlock(&nfs_global_mutex);
2677 return;
2678 }
2679 microuptime(&now);
2680 TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) {
2681 if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) &&
2682 (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) {
2683 continue;
2684 }
2685 TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link);
2686 lck_mtx_unlock(&nfs_global_mutex);
2687 sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR);
2688 sock_close(ncbsp->ncbs_so);
2689 nfs_rpc_record_state_cleanup(&ncbsp->ncbs_rrs);
2690 FREE(ncbsp, M_TEMP);
2691 goto loop;
2692 }
2693 nfs4_callback_timer_on = 1;
2694 nfs_interval_timer_start(nfs4_callback_timer_call,
2695 NFS4_CB_TIMER_PERIOD * 1000);
2696 lck_mtx_unlock(&nfs_global_mutex);
2697 }
2698
2699 /*
2700 * Accept a new callback socket.
2701 */
2702 void
2703 nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag)
2704 {
2705 socket_t newso = NULL;
2706 struct nfs_callback_socket *ncbsp;
2707 struct nfsmount *nmp;
2708 struct timeval timeo, now;
2709 int error, on = 1, ip;
2710
2711 if (so == nfs4_cb_so) {
2712 ip = 4;
2713 } else if (so == nfs4_cb_so6) {
2714 ip = 6;
2715 } else {
2716 return;
2717 }
2718
2719 /* allocate/initialize a new nfs_callback_socket */
2720 MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK);
2721 if (!ncbsp) {
2722 log(LOG_ERR, "nfs callback accept: no memory for new socket\n");
2723 return;
2724 }
2725 bzero(ncbsp, sizeof(*ncbsp));
2726 ncbsp->ncbs_saddr.ss_len = (ip == 4) ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
2727 nfs_rpc_record_state_init(&ncbsp->ncbs_rrs);
2728
2729 /* accept a new socket */
2730 error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr,
2731 ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT,
2732 nfs4_cb_rcv, ncbsp, &newso);
2733 if (error) {
2734 log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip);
2735 FREE(ncbsp, M_TEMP);
2736 return;
2737 }
2738
2739 /* set up the new socket */
2740 /* receive timeout shouldn't matter. If timeout on send, we'll want to drop the socket */
2741 timeo.tv_usec = 0;
2742 timeo.tv_sec = 60;
2743 error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
2744 if (error) {
2745 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip);
2746 }
2747 error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
2748 if (error) {
2749 log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip);
2750 }
2751 sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
2752 sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
2753 sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on));
2754 sock_setsockopt(newso, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on));
2755
2756 ncbsp->ncbs_so = newso;
2757 microuptime(&now);
2758 ncbsp->ncbs_stamp = now.tv_sec;
2759
2760 lck_mtx_lock(&nfs_global_mutex);
2761
2762 /* add it to the list */
2763 TAILQ_INSERT_HEAD(&nfs4_cb_socks, ncbsp, ncbs_link);
2764
2765 /* verify it's from a host we have mounted */
2766 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2767 /* check if socket's source address matches this mount's server address */
2768 if (!nmp->nm_saddr) {
2769 continue;
2770 }
2771 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2772 break;
2773 }
2774 }
2775 if (!nmp) { /* we don't want this socket, mark it dead */
2776 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2777 }
2778
2779 /* make sure the callback socket cleanup timer is running */
2780 /* (shorten the timer if we've got a socket we don't want) */
2781 if (!nfs4_callback_timer_on) {
2782 nfs4_callback_timer_on = 1;
2783 nfs_interval_timer_start(nfs4_callback_timer_call,
2784 !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000));
2785 } else if (!nmp && (nfs4_callback_timer_on < 2)) {
2786 nfs4_callback_timer_on = 2;
2787 thread_call_cancel(nfs4_callback_timer_call);
2788 nfs_interval_timer_start(nfs4_callback_timer_call, 500);
2789 }
2790
2791 lck_mtx_unlock(&nfs_global_mutex);
2792 }
2793
2794 /*
2795 * Receive mbufs from callback sockets into RPC records and process each record.
2796 * Detect connection has been closed and shut down.
2797 */
2798 void
2799 nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag)
2800 {
2801 struct nfs_callback_socket *ncbsp = arg;
2802 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
2803 struct timeval now;
2804 mbuf_t m;
2805 int error = 0, recv = 1;
2806
2807 lck_mtx_lock(&nfs_global_mutex);
2808 while (ncbsp->ncbs_flags & NCBSOCK_UPCALL) {
2809 /* wait if upcall is already in progress */
2810 ncbsp->ncbs_flags |= NCBSOCK_UPCALLWANT;
2811 msleep(ncbsp, &nfs_global_mutex, PSOCK, "cbupcall", &ts);
2812 }
2813 ncbsp->ncbs_flags |= NCBSOCK_UPCALL;
2814 lck_mtx_unlock(&nfs_global_mutex);
2815
2816 /* loop while we make error-free progress */
2817 while (!error && recv) {
2818 error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m);
2819 if (m) { /* handle the request */
2820 error = nfs4_cb_handler(ncbsp, m);
2821 }
2822 }
2823
2824 /* note: no error and no data indicates server closed its end */
2825 if ((error != EWOULDBLOCK) && (error || !recv)) {
2826 /*
2827 * Socket is either being closed or should be.
2828 * We can't close the socket in the context of the upcall.
2829 * So we mark it as dead and leave it for the cleanup timer to reap.
2830 */
2831 ncbsp->ncbs_stamp = 0;
2832 ncbsp->ncbs_flags |= NCBSOCK_DEAD;
2833 } else {
2834 microuptime(&now);
2835 ncbsp->ncbs_stamp = now.tv_sec;
2836 }
2837
2838 lck_mtx_lock(&nfs_global_mutex);
2839 ncbsp->ncbs_flags &= ~NCBSOCK_UPCALL;
2840 lck_mtx_unlock(&nfs_global_mutex);
2841 wakeup(ncbsp);
2842 }
2843
2844 /*
2845 * Handle an NFS callback channel request.
2846 */
2847 int
2848 nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq)
2849 {
2850 socket_t so = ncbsp->ncbs_so;
2851 struct nfsm_chain nmreq, nmrep;
2852 mbuf_t mhead = NULL, mrest = NULL, m;
2853 struct msghdr msg;
2854 struct nfsmount *nmp;
2855 fhandle_t *fh;
2856 nfsnode_t np;
2857 nfs_stateid stateid;
2858 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes;
2859 uint32_t val, xid, procnum, taglen, cbid, numops, op, status;
2860 uint32_t auth_type, auth_len;
2861 uint32_t numres, *pnumres;
2862 int error = 0, replen, len;
2863 size_t sentlen = 0;
2864
2865 xid = numops = op = status = procnum = taglen = cbid = 0;
2866 fh = zalloc(nfs_fhandle_zone);
2867
2868 nfsm_chain_dissect_init(error, &nmreq, mreq);
2869 nfsm_chain_get_32(error, &nmreq, xid); // RPC XID
2870 nfsm_chain_get_32(error, &nmreq, val); // RPC Call
2871 nfsm_assert(error, (val == RPC_CALL), EBADRPC);
2872 nfsm_chain_get_32(error, &nmreq, val); // RPC Version
2873 nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH);
2874 nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number
2875 nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL);
2876 nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number
2877 nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH);
2878 nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number
2879 nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL);
2880
2881 /* Handle authentication */
2882 /* XXX just ignore auth for now - handling kerberos may be tricky */
2883 nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor
2884 nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length
2885 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2886 if (!error && (auth_len > 0)) {
2887 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2888 }
2889 nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
2890 nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length
2891 nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC);
2892 if (!error && (auth_len > 0)) {
2893 nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len));
2894 }
2895 if (error) {
2896 status = error;
2897 error = 0;
2898 goto nfsmout;
2899 }
2900
2901 switch (procnum) {
2902 case NFSPROC4_CB_NULL:
2903 status = NFSERR_RETVOID;
2904 break;
2905 case NFSPROC4_CB_COMPOUND:
2906 /* tag, minorversion, cb ident, numops, op array */
2907 nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */
2908 nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC);
2909
2910 /* start building the body of the response */
2911 nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED);
2912 nfsm_chain_init(&nmrep, mrest);
2913
2914 /* copy tag from request to response */
2915 nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */
2916 for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) {
2917 nfsm_chain_get_32(error, &nmreq, val);
2918 nfsm_chain_add_32(error, &nmrep, val);
2919 }
2920
2921 /* insert number of results placeholder */
2922 numres = 0;
2923 nfsm_chain_add_32(error, &nmrep, numres);
2924 pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED);
2925
2926 nfsm_chain_get_32(error, &nmreq, val); /* minorversion */
2927 nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH);
2928 nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */
2929 nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */
2930 if (error) {
2931 if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) {
2932 status = error;
2933 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
2934 status = NFSERR_RESOURCE;
2935 } else {
2936 status = NFSERR_SERVERFAULT;
2937 }
2938 error = 0;
2939 nfsm_chain_null(&nmrep);
2940 goto nfsmout;
2941 }
2942 /* match the callback ID to a registered mount */
2943 lck_mtx_lock(&nfs_global_mutex);
2944 TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) {
2945 if (nmp->nm_cbid != cbid) {
2946 continue;
2947 }
2948 /* verify socket's source address matches this mount's server address */
2949 if (!nmp->nm_saddr) {
2950 continue;
2951 }
2952 if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) {
2953 break;
2954 }
2955 }
2956 /* mark the NFS mount as busy */
2957 if (nmp) {
2958 nmp->nm_cbrefs++;
2959 }
2960 lck_mtx_unlock(&nfs_global_mutex);
2961 if (!nmp) {
2962 /* if no mount match, just drop socket. */
2963 error = EPERM;
2964 nfsm_chain_null(&nmrep);
2965 goto out;
2966 }
2967
2968 /* process ops, adding results to mrest */
2969 while (numops > 0) {
2970 numops--;
2971 nfsm_chain_get_32(error, &nmreq, op);
2972 if (error) {
2973 break;
2974 }
2975 switch (op) {
2976 case NFS_OP_CB_GETATTR:
2977 // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS)
2978 np = NULL;
2979 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
2980 bmlen = NFS_ATTR_BITMAP_LEN;
2981 nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen);
2982 if (error) {
2983 status = error;
2984 error = 0;
2985 numops = 0; /* don't process any more ops */
2986 } else {
2987 /* find the node for the file handle */
2988 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
2989 if (error || !np) {
2990 status = NFSERR_BADHANDLE;
2991 error = 0;
2992 np = NULL;
2993 numops = 0; /* don't process any more ops */
2994 }
2995 }
2996 nfsm_chain_add_32(error, &nmrep, op);
2997 nfsm_chain_add_32(error, &nmrep, status);
2998 if (!error && (status == EBADRPC)) {
2999 error = status;
3000 }
3001 if (np) {
3002 /* only allow returning size, change, and mtime attrs */
3003 NFS_CLEAR_ATTRIBUTES(&rbitmap);
3004 attrbytes = 0;
3005 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
3006 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_CHANGE);
3007 attrbytes += 2 * NFSX_UNSIGNED;
3008 }
3009 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
3010 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_SIZE);
3011 attrbytes += 2 * NFSX_UNSIGNED;
3012 }
3013 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
3014 NFS_BITMAP_SET(&rbitmap, NFS_FATTR_TIME_MODIFY);
3015 attrbytes += 3 * NFSX_UNSIGNED;
3016 }
3017 nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN);
3018 nfsm_chain_add_32(error, &nmrep, attrbytes);
3019 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) {
3020 nfsm_chain_add_64(error, &nmrep,
3021 np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0));
3022 }
3023 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) {
3024 nfsm_chain_add_64(error, &nmrep, np->n_size);
3025 }
3026 if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) {
3027 nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]);
3028 nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]);
3029 }
3030 nfs_node_unlock(np);
3031 vnode_put(NFSTOV(np));
3032 np = NULL;
3033 }
3034 /*
3035 * If we hit an error building the reply, we can't easily back up.
3036 * So we'll just update the status and hope the server ignores the
3037 * extra garbage.
3038 */
3039 break;
3040 case NFS_OP_CB_RECALL:
3041 // (STATEID, TRUNCATE, FH) -> (STATUS)
3042 np = NULL;
3043 nfsm_chain_get_stateid(error, &nmreq, &stateid);
3044 nfsm_chain_get_32(error, &nmreq, truncate);
3045 nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh);
3046 if (error) {
3047 status = error;
3048 error = 0;
3049 numops = 0; /* don't process any more ops */
3050 } else {
3051 /* find the node for the file handle */
3052 error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np);
3053 if (error || !np) {
3054 status = NFSERR_BADHANDLE;
3055 error = 0;
3056 np = NULL;
3057 numops = 0; /* don't process any more ops */
3058 } else if (!(np->n_openflags & N_DELEG_MASK) ||
3059 bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) {
3060 /* delegation stateid state doesn't match */
3061 status = NFSERR_BAD_STATEID;
3062 numops = 0; /* don't process any more ops */
3063 }
3064 if (!status) { /* add node to recall queue, and wake socket thread */
3065 nfs4_delegation_return_enqueue(np);
3066 }
3067 if (np) {
3068 nfs_node_unlock(np);
3069 vnode_put(NFSTOV(np));
3070 }
3071 }
3072 nfsm_chain_add_32(error, &nmrep, op);
3073 nfsm_chain_add_32(error, &nmrep, status);
3074 if (!error && (status == EBADRPC)) {
3075 error = status;
3076 }
3077 break;
3078 case NFS_OP_CB_ILLEGAL:
3079 default:
3080 nfsm_chain_add_32(error, &nmrep, NFS_OP_CB_ILLEGAL);
3081 status = NFSERR_OP_ILLEGAL;
3082 nfsm_chain_add_32(error, &nmrep, status);
3083 numops = 0; /* don't process any more ops */
3084 break;
3085 }
3086 numres++;
3087 }
3088
3089 if (!status && error) {
3090 if (error == EBADRPC) {
3091 status = error;
3092 } else if ((error == ENOBUFS) || (error == ENOMEM)) {
3093 status = NFSERR_RESOURCE;
3094 } else {
3095 status = NFSERR_SERVERFAULT;
3096 }
3097 error = 0;
3098 }
3099
3100 /* Now, set the numres field */
3101 *pnumres = txdr_unsigned(numres);
3102 nfsm_chain_build_done(error, &nmrep);
3103 nfsm_chain_null(&nmrep);
3104
3105 /* drop the callback reference on the mount */
3106 lck_mtx_lock(&nfs_global_mutex);
3107 nmp->nm_cbrefs--;
3108 if (!nmp->nm_cbid) {
3109 wakeup(&nmp->nm_cbrefs);
3110 }
3111 lck_mtx_unlock(&nfs_global_mutex);
3112 break;
3113 }
3114
3115 nfsmout:
3116 if (status == EBADRPC) {
3117 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3118 }
3119
3120 /* build reply header */
3121 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead);
3122 nfsm_chain_init(&nmrep, mhead);
3123 nfsm_chain_add_32(error, &nmrep, 0); /* insert space for an RPC record mark */
3124 nfsm_chain_add_32(error, &nmrep, xid);
3125 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
3126 if ((status == ERPCMISMATCH) || (status & NFSERR_AUTHERR)) {
3127 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
3128 if (status & NFSERR_AUTHERR) {
3129 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
3130 nfsm_chain_add_32(error, &nmrep, (status & ~NFSERR_AUTHERR));
3131 } else {
3132 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
3133 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3134 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
3135 }
3136 } else {
3137 /* reply status */
3138 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
3139 /* XXX RPCAUTH_NULL verifier */
3140 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
3141 nfsm_chain_add_32(error, &nmrep, 0);
3142 /* accepted status */
3143 switch (status) {
3144 case EPROGUNAVAIL:
3145 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
3146 break;
3147 case EPROGMISMATCH:
3148 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
3149 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3150 nfsm_chain_add_32(error, &nmrep, NFS4_CALLBACK_PROG_VERSION);
3151 break;
3152 case EPROCUNAVAIL:
3153 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
3154 break;
3155 case EBADRPC:
3156 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
3157 break;
3158 default:
3159 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
3160 if (status != NFSERR_RETVOID) {
3161 nfsm_chain_add_32(error, &nmrep, status);
3162 }
3163 break;
3164 }
3165 }
3166 nfsm_chain_build_done(error, &nmrep);
3167 if (error) {
3168 nfsm_chain_null(&nmrep);
3169 goto out;
3170 }
3171 error = mbuf_setnext(nmrep.nmc_mcur, mrest);
3172 if (error) {
3173 printf("nfs cb: mbuf_setnext failed %d\n", error);
3174 goto out;
3175 }
3176 mrest = NULL;
3177 /* Calculate the size of the reply */
3178 replen = 0;
3179 for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) {
3180 replen += mbuf_len(m);
3181 }
3182 mbuf_pkthdr_setlen(mhead, replen);
3183 error = mbuf_pkthdr_setrcvif(mhead, NULL);
3184 nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000);
3185 nfsm_chain_null(&nmrep);
3186
3187 /* send the reply */
3188 bzero(&msg, sizeof(msg));
3189 error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen);
3190 mhead = NULL;
3191 if (!error && ((int)sentlen != replen)) {
3192 error = EWOULDBLOCK;
3193 }
3194 if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */
3195 error = ETIMEDOUT;
3196 }
3197 out:
3198 if (error) {
3199 nfsm_chain_cleanup(&nmrep);
3200 }
3201 if (mhead) {
3202 mbuf_freem(mhead);
3203 }
3204 if (mrest) {
3205 mbuf_freem(mrest);
3206 }
3207 if (mreq) {
3208 mbuf_freem(mreq);
3209 }
3210 NFS_ZFREE(nfs_fhandle_zone, fh);
3211 return error;
3212 }
3213 #endif /* CONFIG_NFS4 */
3214
3215 /*
3216 * Initialize an nfs_rpc_record_state structure.
3217 */
3218 void
3219 nfs_rpc_record_state_init(struct nfs_rpc_record_state *nrrsp)
3220 {
3221 bzero(nrrsp, sizeof(*nrrsp));
3222 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3223 }
3224
3225 /*
3226 * Clean up an nfs_rpc_record_state structure.
3227 */
3228 void
3229 nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *nrrsp)
3230 {
3231 if (nrrsp->nrrs_m) {
3232 mbuf_freem(nrrsp->nrrs_m);
3233 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3234 }
3235 }
3236
3237 /*
3238 * Read the next (marked) RPC record from the socket.
3239 *
3240 * *recvp returns if any data was received.
3241 * *mp returns the next complete RPC record
3242 */
3243 int
3244 nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, int *recvp, mbuf_t *mp)
3245 {
3246 struct iovec aio;
3247 struct msghdr msg;
3248 size_t rcvlen;
3249 int error = 0;
3250 mbuf_t m;
3251
3252 *recvp = 0;
3253 *mp = NULL;
3254
3255 /* read the TCP RPC record marker */
3256 while (!error && nrrsp->nrrs_markerleft) {
3257 aio.iov_base = ((char*)&nrrsp->nrrs_fragleft +
3258 sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft);
3259 aio.iov_len = nrrsp->nrrs_markerleft;
3260 bzero(&msg, sizeof(msg));
3261 msg.msg_iov = &aio;
3262 msg.msg_iovlen = 1;
3263 error = sock_receive(so, &msg, flags, &rcvlen);
3264 if (error || !rcvlen) {
3265 break;
3266 }
3267 *recvp = 1;
3268 nrrsp->nrrs_markerleft -= rcvlen;
3269 if (nrrsp->nrrs_markerleft) {
3270 continue;
3271 }
3272 /* record marker complete */
3273 nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft);
3274 if (nrrsp->nrrs_fragleft & 0x80000000) {
3275 nrrsp->nrrs_lastfrag = 1;
3276 nrrsp->nrrs_fragleft &= ~0x80000000;
3277 }
3278 nrrsp->nrrs_reclen += nrrsp->nrrs_fragleft;
3279 if (nrrsp->nrrs_reclen > NFS_MAXPACKET) {
3280 /* This is SERIOUS! We are out of sync with the sender. */
3281 log(LOG_ERR, "impossible RPC record length (%d) on callback", nrrsp->nrrs_reclen);
3282 error = EFBIG;
3283 }
3284 }
3285
3286 /* read the TCP RPC record fragment */
3287 while (!error && !nrrsp->nrrs_markerleft && nrrsp->nrrs_fragleft) {
3288 m = NULL;
3289 rcvlen = nrrsp->nrrs_fragleft;
3290 error = sock_receivembuf(so, NULL, &m, flags, &rcvlen);
3291 if (error || !rcvlen || !m) {
3292 break;
3293 }
3294 *recvp = 1;
3295 /* append mbufs to list */
3296 nrrsp->nrrs_fragleft -= rcvlen;
3297 if (!nrrsp->nrrs_m) {
3298 nrrsp->nrrs_m = m;
3299 } else {
3300 error = mbuf_setnext(nrrsp->nrrs_mlast, m);
3301 if (error) {
3302 printf("nfs tcp rcv: mbuf_setnext failed %d\n", error);
3303 mbuf_freem(m);
3304 break;
3305 }
3306 }
3307 while (mbuf_next(m)) {
3308 m = mbuf_next(m);
3309 }
3310 nrrsp->nrrs_mlast = m;
3311 }
3312
3313 /* done reading fragment? */
3314 if (!error && !nrrsp->nrrs_markerleft && !nrrsp->nrrs_fragleft) {
3315 /* reset socket fragment parsing state */
3316 nrrsp->nrrs_markerleft = sizeof(nrrsp->nrrs_fragleft);
3317 if (nrrsp->nrrs_lastfrag) {
3318 /* RPC record complete */
3319 *mp = nrrsp->nrrs_m;
3320 /* reset socket record parsing state */
3321 nrrsp->nrrs_reclen = 0;
3322 nrrsp->nrrs_m = nrrsp->nrrs_mlast = NULL;
3323 nrrsp->nrrs_lastfrag = 0;
3324 }
3325 }
3326
3327 return error;
3328 }
3329
3330
3331
3332 /*
3333 * The NFS client send routine.
3334 *
3335 * Send the given NFS request out the mount's socket.
3336 * Holds nfs_sndlock() for the duration of this call.
3337 *
3338 * - check for request termination (sigintr)
3339 * - wait for reconnect, if necessary
3340 * - UDP: check the congestion window
3341 * - make a copy of the request to send
3342 * - UDP: update the congestion window
3343 * - send the request
3344 *
3345 * If sent successfully, R_MUSTRESEND and R_RESENDERR are cleared.
3346 * rexmit count is also updated if this isn't the first send.
3347 *
3348 * If the send is not successful, make sure R_MUSTRESEND is set.
3349 * If this wasn't the first transmit, set R_RESENDERR.
3350 * Also, undo any UDP congestion window changes made.
3351 *
3352 * If the error appears to indicate that the socket should
3353 * be reconnected, mark the socket for reconnection.
3354 *
3355 * Only return errors when the request should be aborted.
3356 */
3357 int
3358 nfs_send(struct nfsreq *req, int wait)
3359 {
3360 struct nfsmount *nmp;
3361 struct nfs_socket *nso;
3362 int error, error2, sotype, rexmit, slpflag = 0, needrecon;
3363 struct msghdr msg;
3364 struct sockaddr *sendnam;
3365 mbuf_t mreqcopy;
3366 size_t sentlen = 0;
3367 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3368
3369 again:
3370 error = nfs_sndlock(req);
3371 if (error) {
3372 lck_mtx_lock(&req->r_mtx);
3373 req->r_error = error;
3374 req->r_flags &= ~R_SENDING;
3375 lck_mtx_unlock(&req->r_mtx);
3376 return error;
3377 }
3378
3379 error = nfs_sigintr(req->r_nmp, req, NULL, 0);
3380 if (error) {
3381 nfs_sndunlock(req);
3382 lck_mtx_lock(&req->r_mtx);
3383 req->r_error = error;
3384 req->r_flags &= ~R_SENDING;
3385 lck_mtx_unlock(&req->r_mtx);
3386 return error;
3387 }
3388 nmp = req->r_nmp;
3389 sotype = nmp->nm_sotype;
3390
3391 /*
3392 * If it's a setup RPC but we're not in SETUP... must need reconnect.
3393 * If it's a recovery RPC but the socket's not ready... must need reconnect.
3394 */
3395 if (((req->r_flags & R_SETUP) && !(nmp->nm_sockflags & NMSOCK_SETUP)) ||
3396 ((req->r_flags & R_RECOVER) && !(nmp->nm_sockflags & NMSOCK_READY))) {
3397 error = ETIMEDOUT;
3398 nfs_sndunlock(req);
3399 lck_mtx_lock(&req->r_mtx);
3400 req->r_error = error;
3401 req->r_flags &= ~R_SENDING;
3402 lck_mtx_unlock(&req->r_mtx);
3403 return error;
3404 }
3405
3406 /* If the socket needs reconnection, do that now. */
3407 /* wait until socket is ready - unless this request is part of setup */
3408 lck_mtx_lock(&nmp->nm_lock);
3409 if (!(nmp->nm_sockflags & NMSOCK_READY) &&
3410 !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) {
3411 if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) {
3412 slpflag |= PCATCH;
3413 }
3414 lck_mtx_unlock(&nmp->nm_lock);
3415 nfs_sndunlock(req);
3416 if (!wait) {
3417 lck_mtx_lock(&req->r_mtx);
3418 req->r_flags &= ~R_SENDING;
3419 req->r_flags |= R_MUSTRESEND;
3420 req->r_rtt = 0;
3421 lck_mtx_unlock(&req->r_mtx);
3422 return 0;
3423 }
3424 NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid);
3425 lck_mtx_lock(&req->r_mtx);
3426 req->r_flags &= ~R_MUSTRESEND;
3427 req->r_rtt = 0;
3428 lck_mtx_unlock(&req->r_mtx);
3429 lck_mtx_lock(&nmp->nm_lock);
3430 while (!(nmp->nm_sockflags & NMSOCK_READY)) {
3431 /* don't bother waiting if the socket thread won't be reconnecting it */
3432 if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) {
3433 error = EIO;
3434 break;
3435 }
3436 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (nmp->nm_reconnect_start > 0)) {
3437 struct timeval now;
3438 microuptime(&now);
3439 if ((now.tv_sec - nmp->nm_reconnect_start) >= 8) {
3440 /* soft mount in reconnect for a while... terminate ASAP */
3441 OSAddAtomic64(1, &nfsstats.rpctimeouts);
3442 req->r_flags |= R_SOFTTERM;
3443 req->r_error = error = ETIMEDOUT;
3444 break;
3445 }
3446 }
3447 /* make sure socket thread is running, then wait */
3448 nfs_mount_sock_thread_wake(nmp);
3449 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3450 break;
3451 }
3452 msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts);
3453 slpflag = 0;
3454 }
3455 lck_mtx_unlock(&nmp->nm_lock);
3456 if (error) {
3457 lck_mtx_lock(&req->r_mtx);
3458 req->r_error = error;
3459 req->r_flags &= ~R_SENDING;
3460 lck_mtx_unlock(&req->r_mtx);
3461 return error;
3462 }
3463 goto again;
3464 }
3465 nso = nmp->nm_nso;
3466 /* note that we're using the mount's socket to do the send */
3467 nmp->nm_state |= NFSSTA_SENDING; /* will be cleared by nfs_sndunlock() */
3468 lck_mtx_unlock(&nmp->nm_lock);
3469 if (!nso) {
3470 nfs_sndunlock(req);
3471 lck_mtx_lock(&req->r_mtx);
3472 req->r_flags &= ~R_SENDING;
3473 req->r_flags |= R_MUSTRESEND;
3474 req->r_rtt = 0;
3475 lck_mtx_unlock(&req->r_mtx);
3476 return 0;
3477 }
3478
3479 lck_mtx_lock(&req->r_mtx);
3480 rexmit = (req->r_flags & R_SENT);
3481
3482 if (sotype == SOCK_DGRAM) {
3483 lck_mtx_lock(&nmp->nm_lock);
3484 if (!(req->r_flags & R_CWND) && (nmp->nm_sent >= nmp->nm_cwnd)) {
3485 /* if we can't send this out yet, wait on the cwnd queue */
3486 slpflag = (NMFLAG(nmp, INTR) && req->r_thread) ? PCATCH : 0;
3487 lck_mtx_unlock(&nmp->nm_lock);
3488 nfs_sndunlock(req);
3489 req->r_flags &= ~R_SENDING;
3490 req->r_flags |= R_MUSTRESEND;
3491 lck_mtx_unlock(&req->r_mtx);
3492 if (!wait) {
3493 req->r_rtt = 0;
3494 return 0;
3495 }
3496 lck_mtx_lock(&nmp->nm_lock);
3497 while (nmp->nm_sent >= nmp->nm_cwnd) {
3498 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) {
3499 break;
3500 }
3501 TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain);
3502 msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts);
3503 slpflag = 0;
3504 if ((req->r_cchain.tqe_next != NFSREQNOLIST)) {
3505 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
3506 req->r_cchain.tqe_next = NFSREQNOLIST;
3507 }
3508 }
3509 lck_mtx_unlock(&nmp->nm_lock);
3510 goto again;
3511 }
3512 /*
3513 * We update these *before* the send to avoid racing
3514 * against others who may be looking to send requests.
3515 */
3516 if (!rexmit) {
3517 /* first transmit */
3518 req->r_flags |= R_CWND;
3519 nmp->nm_sent += NFS_CWNDSCALE;
3520 } else {
3521 /*
3522 * When retransmitting, turn timing off
3523 * and divide congestion window by 2.
3524 */
3525 req->r_flags &= ~R_TIMING;
3526 nmp->nm_cwnd >>= 1;
3527 if (nmp->nm_cwnd < NFS_CWNDSCALE) {
3528 nmp->nm_cwnd = NFS_CWNDSCALE;
3529 }
3530 }
3531 lck_mtx_unlock(&nmp->nm_lock);
3532 }
3533
3534 req->r_flags &= ~R_MUSTRESEND;
3535 lck_mtx_unlock(&req->r_mtx);
3536
3537 error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL,
3538 wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy);
3539 if (error) {
3540 if (wait) {
3541 log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error);
3542 }
3543 nfs_sndunlock(req);
3544 lck_mtx_lock(&req->r_mtx);
3545 req->r_flags &= ~R_SENDING;
3546 req->r_flags |= R_MUSTRESEND;
3547 req->r_rtt = 0;
3548 lck_mtx_unlock(&req->r_mtx);
3549 return 0;
3550 }
3551
3552 bzero(&msg, sizeof(msg));
3553 if ((sotype != SOCK_STREAM) && !sock_isconnected(nso->nso_so) && ((sendnam = nmp->nm_saddr))) {
3554 msg.msg_name = (caddr_t)sendnam;
3555 msg.msg_namelen = sendnam->sa_len;
3556 }
3557 NFS_SOCK_DUMP_MBUF("Sending mbuf\n", mreqcopy);
3558 error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen);
3559 if (error || (sentlen != req->r_mreqlen)) {
3560 NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n",
3561 req->r_xid, (int)sentlen, (int)req->r_mreqlen, error);
3562 }
3563
3564 if (!error && (sentlen != req->r_mreqlen)) {
3565 error = EWOULDBLOCK;
3566 }
3567 needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen));
3568
3569 lck_mtx_lock(&req->r_mtx);
3570 req->r_flags &= ~R_SENDING;
3571 req->r_rtt = 0;
3572 if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) {
3573 req->r_rexmit = NFS_MAXREXMIT;
3574 }
3575
3576 if (!error) {
3577 /* SUCCESS */
3578 req->r_flags &= ~R_RESENDERR;
3579 if (rexmit) {
3580 OSAddAtomic64(1, &nfsstats.rpcretries);
3581 }
3582 req->r_flags |= R_SENT;
3583 if (req->r_flags & R_WAITSENT) {
3584 req->r_flags &= ~R_WAITSENT;
3585 wakeup(req);
3586 }
3587 nfs_sndunlock(req);
3588 lck_mtx_unlock(&req->r_mtx);
3589 return 0;
3590 }
3591
3592 /* send failed */
3593 req->r_flags |= R_MUSTRESEND;
3594 if (rexmit) {
3595 req->r_flags |= R_RESENDERR;
3596 }
3597 if ((error == EINTR) || (error == ERESTART)) {
3598 req->r_error = error;
3599 }
3600 lck_mtx_unlock(&req->r_mtx);
3601
3602 if (sotype == SOCK_DGRAM) {
3603 /*
3604 * Note: even though a first send may fail, we consider
3605 * the request sent for congestion window purposes.
3606 * So we don't need to undo any of the changes made above.
3607 */
3608 /*
3609 * Socket errors ignored for connectionless sockets??
3610 * For now, ignore them all
3611 */
3612 if ((error != EINTR) && (error != ERESTART) &&
3613 (error != EWOULDBLOCK) && (error != EIO) && (nso == nmp->nm_nso)) {
3614 int clearerror = 0, optlen = sizeof(clearerror);
3615 sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen);
3616 #ifdef NFS_SOCKET_DEBUGGING
3617 if (clearerror) {
3618 NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n",
3619 error, clearerror);
3620 }
3621 #endif
3622 }
3623 }
3624
3625 /* check if it appears we should reconnect the socket */
3626 switch (error) {
3627 case EWOULDBLOCK:
3628 /* if send timed out, reconnect if on TCP */
3629 if (sotype != SOCK_STREAM) {
3630 break;
3631 }
3632 OS_FALLTHROUGH;
3633 case EPIPE:
3634 case EADDRNOTAVAIL:
3635 case ENETDOWN:
3636 case ENETUNREACH:
3637 case ENETRESET:
3638 case ECONNABORTED:
3639 case ECONNRESET:
3640 case ENOTCONN:
3641 case ESHUTDOWN:
3642 case ECONNREFUSED:
3643 case EHOSTDOWN:
3644 case EHOSTUNREACH:
3645 /* case ECANCELED??? */
3646 needrecon = 1;
3647 break;
3648 }
3649 if (needrecon && (nso == nmp->nm_nso)) { /* mark socket as needing reconnect */
3650 NFS_SOCK_DBG("nfs_send: 0x%llx need reconnect %d\n", req->r_xid, error);
3651 nfs_need_reconnect(nmp);
3652 }
3653
3654 nfs_sndunlock(req);
3655
3656 if (nfs_is_dead(error, nmp)) {
3657 error = EIO;
3658 }
3659
3660 /*
3661 * Don't log some errors:
3662 * EPIPE errors may be common with servers that drop idle connections.
3663 * EADDRNOTAVAIL may occur on network transitions.
3664 * ENOTCONN may occur under some network conditions.
3665 */
3666 if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) {
3667 error = 0;
3668 }
3669 if (error && (error != EINTR) && (error != ERESTART)) {
3670 log(LOG_INFO, "nfs send error %d for server %s\n", error,
3671 !req->r_nmp ? "<unmounted>" :
3672 vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname);
3673 }
3674
3675 /* prefer request termination error over other errors */
3676 error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0);
3677 if (error2) {
3678 error = error2;
3679 }
3680
3681 /* only allow the following errors to be returned */
3682 if ((error != EINTR) && (error != ERESTART) && (error != EIO) &&
3683 (error != ENXIO) && (error != ETIMEDOUT)) {
3684 /*
3685 * We got some error we don't know what do do with,
3686 * i.e., we're not reconnecting, we map it to
3687 * EIO. Presumably our send failed and we better tell
3688 * the caller so they don't wait for a reply that is
3689 * never going to come. If we are reconnecting we
3690 * return 0 and the request will be resent.
3691 */
3692 error = needrecon ? 0 : EIO;
3693 }
3694 return error;
3695 }
3696
3697 /*
3698 * NFS client socket upcalls
3699 *
3700 * Pull RPC replies out of an NFS mount's socket and match them
3701 * up with the pending request.
3702 *
3703 * The datagram code is simple because we always get whole
3704 * messages out of the socket.
3705 *
3706 * The stream code is more involved because we have to parse
3707 * the RPC records out of the stream.
3708 */
3709
3710 /* NFS client UDP socket upcall */
3711 void
3712 nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag)
3713 {
3714 struct nfsmount *nmp = arg;
3715 struct nfs_socket *nso = nmp->nm_nso;
3716 size_t rcvlen;
3717 mbuf_t m;
3718 int error = 0;
3719
3720 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3721 return;
3722 }
3723
3724 do {
3725 /* make sure we're on the current socket */
3726 if (!nso || (nso->nso_so != so)) {
3727 return;
3728 }
3729
3730 m = NULL;
3731 rcvlen = 1000000;
3732 error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen);
3733 if (m) {
3734 nfs_request_match_reply(nmp, m);
3735 }
3736 } while (m && !error);
3737
3738 if (error && (error != EWOULDBLOCK)) {
3739 /* problems with the socket... mark for reconnection */
3740 NFS_SOCK_DBG("nfs_udp_rcv: need reconnect %d\n", error);
3741 nfs_need_reconnect(nmp);
3742 }
3743 }
3744
3745 /* NFS client TCP socket upcall */
3746 void
3747 nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag)
3748 {
3749 struct nfsmount *nmp = arg;
3750 struct nfs_socket *nso = nmp->nm_nso;
3751 struct nfs_rpc_record_state nrrs;
3752 mbuf_t m;
3753 int error = 0;
3754 int recv = 1;
3755 int wup = 0;
3756
3757 if (nmp->nm_sockflags & NMSOCK_CONNECTING) {
3758 return;
3759 }
3760
3761 /* make sure we're on the current socket */
3762 lck_mtx_lock(&nmp->nm_lock);
3763 nso = nmp->nm_nso;
3764 if (!nso || (nso->nso_so != so) || (nmp->nm_sockflags & (NMSOCK_DISCONNECTING))) {
3765 lck_mtx_unlock(&nmp->nm_lock);
3766 return;
3767 }
3768 lck_mtx_unlock(&nmp->nm_lock);
3769
3770 /* make sure this upcall should be trying to do work */
3771 lck_mtx_lock(&nso->nso_lock);
3772 if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) {
3773 lck_mtx_unlock(&nso->nso_lock);
3774 return;
3775 }
3776 nso->nso_flags |= NSO_UPCALL;
3777 nrrs = nso->nso_rrs;
3778 lck_mtx_unlock(&nso->nso_lock);
3779
3780 /* loop while we make error-free progress */
3781 while (!error && recv) {
3782 error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m);
3783 if (m) { /* match completed response with request */
3784 nfs_request_match_reply(nmp, m);
3785 }
3786 }
3787
3788 /* Update the sockets's rpc parsing state */
3789 lck_mtx_lock(&nso->nso_lock);
3790 nso->nso_rrs = nrrs;
3791 if (nso->nso_flags & NSO_DISCONNECTING) {
3792 wup = 1;
3793 }
3794 nso->nso_flags &= ~NSO_UPCALL;
3795 lck_mtx_unlock(&nso->nso_lock);
3796 if (wup) {
3797 wakeup(&nso->nso_flags);
3798 }
3799
3800 #ifdef NFS_SOCKET_DEBUGGING
3801 if (!recv && (error != EWOULDBLOCK)) {
3802 NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error);
3803 }
3804 #endif
3805 /* note: no error and no data indicates server closed its end */
3806 if ((error != EWOULDBLOCK) && (error || !recv)) {
3807 /* problems with the socket... mark for reconnection */
3808 NFS_SOCK_DBG("nfs_tcp_rcv: need reconnect %d\n", error);
3809 nfs_need_reconnect(nmp);
3810 }
3811 }
3812
3813 /*
3814 * "poke" a socket to try to provoke any pending errors
3815 */
3816 void
3817 nfs_sock_poke(struct nfsmount *nmp)
3818 {
3819 struct iovec aio;
3820 struct msghdr msg;
3821 size_t len;
3822 int error = 0;
3823 int dummy;
3824
3825 lck_mtx_lock(&nmp->nm_lock);
3826 if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) ||
3827 !(nmp->nm_sockflags & NMSOCK_READY) || !nmp->nm_nso || !nmp->nm_nso->nso_so) {
3828 /* Nothing to poke */
3829 nmp->nm_sockflags &= ~NMSOCK_POKE;
3830 wakeup(&nmp->nm_sockflags);
3831 lck_mtx_unlock(&nmp->nm_lock);
3832 return;
3833 }
3834 lck_mtx_unlock(&nmp->nm_lock);
3835 aio.iov_base = &dummy;
3836 aio.iov_len = 0;
3837 len = 0;
3838 bzero(&msg, sizeof(msg));
3839 msg.msg_iov = &aio;
3840 msg.msg_iovlen = 1;
3841 error = sock_send(nmp->nm_nso->nso_so, &msg, MSG_DONTWAIT, &len);
3842 NFS_SOCK_DBG("nfs_sock_poke: error %d\n", error);
3843 lck_mtx_lock(&nmp->nm_lock);
3844 nmp->nm_sockflags &= ~NMSOCK_POKE;
3845 wakeup(&nmp->nm_sockflags);
3846 lck_mtx_unlock(&nmp->nm_lock);
3847 nfs_is_dead(error, nmp);
3848 }
3849
3850 /*
3851 * Match an RPC reply with the corresponding request
3852 */
3853 void
3854 nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep)
3855 {
3856 struct nfsreq *req;
3857 struct nfsm_chain nmrep;
3858 u_int32_t reply = 0, rxid = 0;
3859 int error = 0, asyncioq, t1;
3860
3861 bzero(&nmrep, sizeof(nmrep));
3862 /* Get the xid and check that it is an rpc reply */
3863 nfsm_chain_dissect_init(error, &nmrep, mrep);
3864 nfsm_chain_get_32(error, &nmrep, rxid);
3865 nfsm_chain_get_32(error, &nmrep, reply);
3866 if (error || (reply != RPC_REPLY)) {
3867 OSAddAtomic64(1, &nfsstats.rpcinvalid);
3868 mbuf_freem(mrep);
3869 return;
3870 }
3871
3872 /*
3873 * Loop through the request list to match up the reply
3874 * Iff no match, just drop it.
3875 */
3876 lck_mtx_lock(&nfs_request_mutex);
3877 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
3878 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3879 continue;
3880 }
3881 /* looks like we have it, grab lock and double check */
3882 lck_mtx_lock(&req->r_mtx);
3883 if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) {
3884 lck_mtx_unlock(&req->r_mtx);
3885 continue;
3886 }
3887 /* Found it.. */
3888 req->r_nmrep = nmrep;
3889 lck_mtx_lock(&nmp->nm_lock);
3890 if (nmp->nm_sotype == SOCK_DGRAM) {
3891 /*
3892 * Update congestion window.
3893 * Do the additive increase of one rpc/rtt.
3894 */
3895 FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
3896 if (nmp->nm_cwnd <= nmp->nm_sent) {
3897 nmp->nm_cwnd +=
3898 ((NFS_CWNDSCALE * NFS_CWNDSCALE) +
3899 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
3900 if (nmp->nm_cwnd > NFS_MAXCWND) {
3901 nmp->nm_cwnd = NFS_MAXCWND;
3902 }
3903 }
3904 if (req->r_flags & R_CWND) {
3905 nmp->nm_sent -= NFS_CWNDSCALE;
3906 req->r_flags &= ~R_CWND;
3907 }
3908 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
3909 /* congestion window is open, poke the cwnd queue */
3910 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
3911 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
3912 req2->r_cchain.tqe_next = NFSREQNOLIST;
3913 wakeup(req2);
3914 }
3915 }
3916 /*
3917 * Update rtt using a gain of 0.125 on the mean
3918 * and a gain of 0.25 on the deviation.
3919 */
3920 if (req->r_flags & R_TIMING) {
3921 /*
3922 * Since the timer resolution of
3923 * NFS_HZ is so course, it can often
3924 * result in r_rtt == 0. Since
3925 * r_rtt == N means that the actual
3926 * rtt is between N+dt and N+2-dt ticks,
3927 * add 1.
3928 */
3929 if (proct[req->r_procnum] == 0) {
3930 panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum);
3931 }
3932 t1 = req->r_rtt + 1;
3933 t1 -= (NFS_SRTT(req) >> 3);
3934 NFS_SRTT(req) += t1;
3935 if (t1 < 0) {
3936 t1 = -t1;
3937 }
3938 t1 -= (NFS_SDRTT(req) >> 2);
3939 NFS_SDRTT(req) += t1;
3940 }
3941 nmp->nm_timeouts = 0;
3942 lck_mtx_unlock(&nmp->nm_lock);
3943 /* signal anyone waiting on this request */
3944 wakeup(req);
3945 asyncioq = (req->r_callback.rcb_func != NULL);
3946 #if CONFIG_NFS_GSS
3947 if (nfs_request_using_gss(req)) {
3948 nfs_gss_clnt_rpcdone(req);
3949 }
3950 #endif /* CONFIG_NFS_GSS */
3951 lck_mtx_unlock(&req->r_mtx);
3952 lck_mtx_unlock(&nfs_request_mutex);
3953 /* if it's an async RPC with a callback, queue it up */
3954 if (asyncioq) {
3955 nfs_asyncio_finish(req);
3956 }
3957 break;
3958 }
3959
3960 if (!req) {
3961 /* not matched to a request, so drop it. */
3962 lck_mtx_unlock(&nfs_request_mutex);
3963 OSAddAtomic64(1, &nfsstats.rpcunexpected);
3964 mbuf_freem(mrep);
3965 }
3966 }
3967
3968 /*
3969 * Wait for the reply for a given request...
3970 * ...potentially resending the request if necessary.
3971 */
3972 int
3973 nfs_wait_reply(struct nfsreq *req)
3974 {
3975 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
3976 int error = 0, slpflag, first = 1;
3977
3978 if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
3979 slpflag = PCATCH;
3980 } else {
3981 slpflag = 0;
3982 }
3983
3984 lck_mtx_lock(&req->r_mtx);
3985 while (!req->r_nmrep.nmc_mhead) {
3986 if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) {
3987 break;
3988 }
3989 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
3990 break;
3991 }
3992 /* check if we need to resend */
3993 if (req->r_flags & R_MUSTRESEND) {
3994 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n",
3995 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
3996 req->r_flags |= R_SENDING;
3997 lck_mtx_unlock(&req->r_mtx);
3998 if (nfs_request_using_gss(req)) {
3999 /*
4000 * It's an RPCSEC_GSS request.
4001 * Can't just resend the original request
4002 * without bumping the cred sequence number.
4003 * Go back and re-build the request.
4004 */
4005 lck_mtx_lock(&req->r_mtx);
4006 req->r_flags &= ~R_SENDING;
4007 lck_mtx_unlock(&req->r_mtx);
4008 return EAGAIN;
4009 }
4010 error = nfs_send(req, 1);
4011 lck_mtx_lock(&req->r_mtx);
4012 NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n",
4013 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error);
4014 if (error) {
4015 break;
4016 }
4017 if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) {
4018 break;
4019 }
4020 }
4021 /* need to poll if we're P_NOREMOTEHANG */
4022 if (nfs_noremotehang(req->r_thread)) {
4023 ts.tv_sec = 1;
4024 }
4025 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts);
4026 first = slpflag = 0;
4027 }
4028 lck_mtx_unlock(&req->r_mtx);
4029
4030 return error;
4031 }
4032
4033 /*
4034 * An NFS request goes something like this:
4035 * (nb: always frees up mreq mbuf list)
4036 * nfs_request_create()
4037 * - allocates a request struct if one is not provided
4038 * - initial fill-in of the request struct
4039 * nfs_request_add_header()
4040 * - add the RPC header
4041 * nfs_request_send()
4042 * - link it into list
4043 * - call nfs_send() for first transmit
4044 * nfs_request_wait()
4045 * - call nfs_wait_reply() to wait for the reply
4046 * nfs_request_finish()
4047 * - break down rpc header and return with error or nfs reply
4048 * pointed to by nmrep.
4049 * nfs_request_rele()
4050 * nfs_request_destroy()
4051 * - clean up the request struct
4052 * - free the request struct if it was allocated by nfs_request_create()
4053 */
4054
4055 /*
4056 * Set up an NFS request struct (allocating if no request passed in).
4057 */
4058 int
4059 nfs_request_create(
4060 nfsnode_t np,
4061 mount_t mp, /* used only if !np */
4062 struct nfsm_chain *nmrest,
4063 int procnum,
4064 thread_t thd,
4065 kauth_cred_t cred,
4066 struct nfsreq **reqp)
4067 {
4068 struct nfsreq *req, *newreq = NULL;
4069 struct nfsmount *nmp;
4070
4071 req = *reqp;
4072 if (!req) {
4073 /* allocate a new NFS request structure */
4074 req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
4075 } else {
4076 bzero(req, sizeof(*req));
4077 }
4078 if (req == newreq) {
4079 req->r_flags = R_ALLOCATED;
4080 }
4081
4082 nmp = VFSTONFS(np ? NFSTOMP(np) : mp);
4083 if (nfs_mount_gone(nmp)) {
4084 if (newreq) {
4085 NFS_ZFREE(nfs_req_zone, newreq);
4086 }
4087 return ENXIO;
4088 }
4089 lck_mtx_lock(&nmp->nm_lock);
4090 if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) &&
4091 (nmp->nm_state & NFSSTA_TIMEO)) {
4092 lck_mtx_unlock(&nmp->nm_lock);
4093 mbuf_freem(nmrest->nmc_mhead);
4094 nmrest->nmc_mhead = NULL;
4095 if (newreq) {
4096 NFS_ZFREE(nfs_req_zone, newreq);
4097 }
4098 return ENXIO;
4099 }
4100
4101 if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) {
4102 OSAddAtomic64(1, &nfsstats.rpccnt[procnum]);
4103 }
4104 if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) {
4105 panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum);
4106 }
4107
4108 lck_mtx_init(&req->r_mtx, &nfs_request_grp, LCK_ATTR_NULL);
4109 req->r_nmp = nmp;
4110 nmp->nm_ref++;
4111 req->r_np = np;
4112 req->r_thread = thd;
4113 if (!thd) {
4114 req->r_flags |= R_NOINTR;
4115 }
4116 if (IS_VALID_CRED(cred)) {
4117 kauth_cred_ref(cred);
4118 req->r_cred = cred;
4119 }
4120 req->r_procnum = procnum;
4121 if (proct[procnum] > 0) {
4122 req->r_flags |= R_TIMING;
4123 }
4124 req->r_nmrep.nmc_mhead = NULL;
4125 SLIST_INIT(&req->r_gss_seqlist);
4126 req->r_achain.tqe_next = NFSREQNOLIST;
4127 req->r_rchain.tqe_next = NFSREQNOLIST;
4128 req->r_cchain.tqe_next = NFSREQNOLIST;
4129
4130 /* set auth flavor to use for request */
4131 if (!req->r_cred) {
4132 req->r_auth = RPCAUTH_NONE;
4133 } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) {
4134 req->r_auth = req->r_np->n_auth;
4135 } else {
4136 req->r_auth = nmp->nm_auth;
4137 }
4138
4139 lck_mtx_unlock(&nmp->nm_lock);
4140
4141 /* move the request mbuf chain to the nfsreq */
4142 req->r_mrest = nmrest->nmc_mhead;
4143 nmrest->nmc_mhead = NULL;
4144
4145 req->r_flags |= R_INITTED;
4146 req->r_refs = 1;
4147 if (newreq) {
4148 *reqp = req;
4149 }
4150 return 0;
4151 }
4152
4153 /*
4154 * Clean up and free an NFS request structure.
4155 */
4156 void
4157 nfs_request_destroy(struct nfsreq *req)
4158 {
4159 struct nfsmount *nmp;
4160 int clearjbtimeo = 0;
4161
4162 #if CONFIG_NFS_GSS
4163 struct gss_seq *gsp, *ngsp;
4164 #endif
4165
4166 if (!req || !(req->r_flags & R_INITTED)) {
4167 return;
4168 }
4169 nmp = req->r_nmp;
4170 req->r_flags &= ~R_INITTED;
4171 if (req->r_lflags & RL_QUEUED) {
4172 nfs_reqdequeue(req);
4173 }
4174
4175 if (req->r_achain.tqe_next != NFSREQNOLIST) {
4176 /*
4177 * Still on an async I/O queue?
4178 * %%% But which one, we may be on a local iod.
4179 */
4180 lck_mtx_lock(&nfsiod_mutex);
4181 if (nmp && req->r_achain.tqe_next != NFSREQNOLIST) {
4182 TAILQ_REMOVE(&nmp->nm_iodq, req, r_achain);
4183 req->r_achain.tqe_next = NFSREQNOLIST;
4184 }
4185 lck_mtx_unlock(&nfsiod_mutex);
4186 }
4187
4188 lck_mtx_lock(&req->r_mtx);
4189 if (nmp) {
4190 lck_mtx_lock(&nmp->nm_lock);
4191 if (req->r_flags & R_CWND) {
4192 /* Decrement the outstanding request count. */
4193 req->r_flags &= ~R_CWND;
4194 nmp->nm_sent -= NFS_CWNDSCALE;
4195 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4196 /* congestion window is open, poke the cwnd queue */
4197 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4198 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4199 req2->r_cchain.tqe_next = NFSREQNOLIST;
4200 wakeup(req2);
4201 }
4202 }
4203 /* XXX should we just remove this conditional, we should have a reference if we're resending */
4204 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4205 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4206 req->r_flags &= ~R_RESENDQ;
4207 req->r_rchain.tqe_next = NFSREQNOLIST;
4208 }
4209 if (req->r_cchain.tqe_next != NFSREQNOLIST) {
4210 TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain);
4211 req->r_cchain.tqe_next = NFSREQNOLIST;
4212 }
4213 if (req->r_flags & R_JBTPRINTFMSG) {
4214 req->r_flags &= ~R_JBTPRINTFMSG;
4215 nmp->nm_jbreqs--;
4216 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4217 }
4218 lck_mtx_unlock(&nmp->nm_lock);
4219 }
4220 lck_mtx_unlock(&req->r_mtx);
4221
4222 if (clearjbtimeo) {
4223 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4224 }
4225 if (req->r_mhead) {
4226 mbuf_freem(req->r_mhead);
4227 } else if (req->r_mrest) {
4228 mbuf_freem(req->r_mrest);
4229 }
4230 if (req->r_nmrep.nmc_mhead) {
4231 mbuf_freem(req->r_nmrep.nmc_mhead);
4232 }
4233 if (IS_VALID_CRED(req->r_cred)) {
4234 kauth_cred_unref(&req->r_cred);
4235 }
4236 #if CONFIG_NFS_GSS
4237 if (nfs_request_using_gss(req)) {
4238 nfs_gss_clnt_rpcdone(req);
4239 }
4240 SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp)
4241 FREE(gsp, M_TEMP);
4242 if (req->r_gss_ctx) {
4243 nfs_gss_clnt_ctx_unref(req);
4244 }
4245 #endif /* CONFIG_NFS_GSS */
4246 if (req->r_wrongsec) {
4247 FREE(req->r_wrongsec, M_TEMP);
4248 }
4249 if (nmp) {
4250 nfs_mount_rele(nmp);
4251 }
4252 lck_mtx_destroy(&req->r_mtx, &nfs_request_grp);
4253 if (req->r_flags & R_ALLOCATED) {
4254 NFS_ZFREE(nfs_req_zone, req);
4255 }
4256 }
4257
4258 void
4259 nfs_request_ref(struct nfsreq *req, int locked)
4260 {
4261 if (!locked) {
4262 lck_mtx_lock(&req->r_mtx);
4263 }
4264 if (req->r_refs <= 0) {
4265 panic("nfsreq reference error");
4266 }
4267 req->r_refs++;
4268 if (!locked) {
4269 lck_mtx_unlock(&req->r_mtx);
4270 }
4271 }
4272
4273 void
4274 nfs_request_rele(struct nfsreq *req)
4275 {
4276 int destroy;
4277
4278 lck_mtx_lock(&req->r_mtx);
4279 if (req->r_refs <= 0) {
4280 panic("nfsreq reference underflow");
4281 }
4282 req->r_refs--;
4283 destroy = (req->r_refs == 0);
4284 lck_mtx_unlock(&req->r_mtx);
4285 if (destroy) {
4286 nfs_request_destroy(req);
4287 }
4288 }
4289
4290
4291 /*
4292 * Add an (updated) RPC header with authorization to an NFS request.
4293 */
4294 int
4295 nfs_request_add_header(struct nfsreq *req)
4296 {
4297 struct nfsmount *nmp;
4298 int error = 0;
4299 mbuf_t m;
4300
4301 /* free up any previous header */
4302 if ((m = req->r_mhead)) {
4303 while (m && (m != req->r_mrest)) {
4304 m = mbuf_free(m);
4305 }
4306 req->r_mhead = NULL;
4307 }
4308
4309 nmp = req->r_nmp;
4310 if (nfs_mount_gone(nmp)) {
4311 return ENXIO;
4312 }
4313
4314 error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead);
4315 if (error) {
4316 return error;
4317 }
4318
4319 req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead);
4320 nmp = req->r_nmp;
4321 if (nfs_mount_gone(nmp)) {
4322 return ENXIO;
4323 }
4324 lck_mtx_lock(&nmp->nm_lock);
4325 if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) {
4326 req->r_retry = nmp->nm_retry;
4327 } else {
4328 req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
4329 }
4330 lck_mtx_unlock(&nmp->nm_lock);
4331
4332 return error;
4333 }
4334
4335
4336 /*
4337 * Queue an NFS request up and send it out.
4338 */
4339 int
4340 nfs_request_send(struct nfsreq *req, int wait)
4341 {
4342 struct nfsmount *nmp;
4343 struct timeval now;
4344
4345 lck_mtx_lock(&req->r_mtx);
4346 req->r_flags |= R_SENDING;
4347 lck_mtx_unlock(&req->r_mtx);
4348
4349 lck_mtx_lock(&nfs_request_mutex);
4350
4351 nmp = req->r_nmp;
4352 if (nfs_mount_gone(nmp)) {
4353 lck_mtx_unlock(&nfs_request_mutex);
4354 return ENXIO;
4355 }
4356
4357 microuptime(&now);
4358 if (!req->r_start) {
4359 req->r_start = now.tv_sec;
4360 req->r_lastmsg = now.tv_sec -
4361 ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
4362 }
4363
4364 OSAddAtomic64(1, &nfsstats.rpcrequests);
4365
4366 /*
4367 * Make sure the request is not in the queue.
4368 */
4369 if (req->r_lflags & RL_QUEUED) {
4370 #if DEVELOPMENT
4371 panic("nfs_request_send: req %p is already in global requests queue", req);
4372 #else
4373 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
4374 req->r_lflags &= ~RL_QUEUED;
4375 #endif /* DEVELOPMENT */
4376 }
4377
4378 /*
4379 * Chain request into list of outstanding requests. Be sure
4380 * to put it LAST so timer finds oldest requests first.
4381 * Make sure that the request queue timer is running
4382 * to check for possible request timeout.
4383 */
4384 TAILQ_INSERT_TAIL(&nfs_reqq, req, r_chain);
4385 req->r_lflags |= RL_QUEUED;
4386 if (!nfs_request_timer_on) {
4387 nfs_request_timer_on = 1;
4388 nfs_interval_timer_start(nfs_request_timer_call,
4389 NFS_REQUESTDELAY);
4390 }
4391 lck_mtx_unlock(&nfs_request_mutex);
4392
4393 /* Send the request... */
4394 return nfs_send(req, wait);
4395 }
4396
4397 /*
4398 * Call nfs_wait_reply() to wait for the reply.
4399 */
4400 void
4401 nfs_request_wait(struct nfsreq *req)
4402 {
4403 req->r_error = nfs_wait_reply(req);
4404 }
4405
4406 /*
4407 * Finish up an NFS request by dequeueing it and
4408 * doing the initial NFS request reply processing.
4409 */
4410 int
4411 nfs_request_finish(
4412 struct nfsreq *req,
4413 struct nfsm_chain *nmrepp,
4414 int *status)
4415 {
4416 struct nfsmount *nmp;
4417 mbuf_t mrep;
4418 int verf_type = 0;
4419 uint32_t verf_len = 0;
4420 uint32_t reply_status = 0;
4421 uint32_t rejected_status = 0;
4422 uint32_t auth_status = 0;
4423 uint32_t accepted_status = 0;
4424 struct nfsm_chain nmrep;
4425 int error, clearjbtimeo;
4426
4427 error = req->r_error;
4428
4429 if (nmrepp) {
4430 nmrepp->nmc_mhead = NULL;
4431 }
4432
4433 /* RPC done, unlink the request. */
4434 nfs_reqdequeue(req);
4435
4436 mrep = req->r_nmrep.nmc_mhead;
4437
4438 nmp = req->r_nmp;
4439
4440 if ((req->r_flags & R_CWND) && nmp) {
4441 /*
4442 * Decrement the outstanding request count.
4443 */
4444 req->r_flags &= ~R_CWND;
4445 lck_mtx_lock(&nmp->nm_lock);
4446 FSDBG(273, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
4447 nmp->nm_sent -= NFS_CWNDSCALE;
4448 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
4449 /* congestion window is open, poke the cwnd queue */
4450 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
4451 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
4452 req2->r_cchain.tqe_next = NFSREQNOLIST;
4453 wakeup(req2);
4454 }
4455 lck_mtx_unlock(&nmp->nm_lock);
4456 }
4457
4458 #if CONFIG_NFS_GSS
4459 if (nfs_request_using_gss(req)) {
4460 /*
4461 * If the request used an RPCSEC_GSS credential
4462 * then reset its sequence number bit in the
4463 * request window.
4464 */
4465 nfs_gss_clnt_rpcdone(req);
4466
4467 /*
4468 * If we need to re-send, go back and re-build the
4469 * request based on a new sequence number.
4470 * Note that we're using the original XID.
4471 */
4472 if (error == EAGAIN) {
4473 req->r_error = 0;
4474 if (mrep) {
4475 mbuf_freem(mrep);
4476 }
4477 error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs
4478 req->r_nmrep.nmc_mhead = NULL;
4479 req->r_flags |= R_RESTART;
4480 if (error == ENEEDAUTH) {
4481 req->r_xid = 0; // get a new XID
4482 error = 0;
4483 }
4484 goto nfsmout;
4485 }
4486 }
4487 #endif /* CONFIG_NFS_GSS */
4488
4489 /*
4490 * If there was a successful reply, make sure to mark the mount as up.
4491 * If a tprintf message was given (or if this is a timed-out soft mount)
4492 * then post a tprintf message indicating the server is alive again.
4493 */
4494 if (!error) {
4495 if ((req->r_flags & R_TPRINTFMSG) ||
4496 (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) &&
4497 ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) {
4498 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again");
4499 } else {
4500 nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL);
4501 }
4502 }
4503 if (!error && !nmp) {
4504 error = ENXIO;
4505 }
4506 nfsmout_if(error);
4507
4508 /*
4509 * break down the RPC header and check if ok
4510 */
4511 nmrep = req->r_nmrep;
4512 nfsm_chain_get_32(error, &nmrep, reply_status);
4513 nfsmout_if(error);
4514 if (reply_status == RPC_MSGDENIED) {
4515 nfsm_chain_get_32(error, &nmrep, rejected_status);
4516 nfsmout_if(error);
4517 if (rejected_status == RPC_MISMATCH) {
4518 error = ENOTSUP;
4519 goto nfsmout;
4520 }
4521 nfsm_chain_get_32(error, &nmrep, auth_status);
4522 nfsmout_if(error);
4523 switch (auth_status) {
4524 #if CONFIG_NFS_GSS
4525 case RPCSEC_GSS_CREDPROBLEM:
4526 case RPCSEC_GSS_CTXPROBLEM:
4527 /*
4528 * An RPCSEC_GSS cred or context problem.
4529 * We can't use it anymore.
4530 * Restore the args, renew the context
4531 * and set up for a resend.
4532 */
4533 error = nfs_gss_clnt_args_restore(req);
4534 if (error && error != ENEEDAUTH) {
4535 break;
4536 }
4537
4538 if (!error) {
4539 error = nfs_gss_clnt_ctx_renew(req);
4540 if (error) {
4541 break;
4542 }
4543 }
4544 mbuf_freem(mrep);
4545 req->r_nmrep.nmc_mhead = NULL;
4546 req->r_xid = 0; // get a new XID
4547 req->r_flags |= R_RESTART;
4548 goto nfsmout;
4549 #endif /* CONFIG_NFS_GSS */
4550 default:
4551 error = EACCES;
4552 break;
4553 }
4554 goto nfsmout;
4555 }
4556
4557 /* Now check the verifier */
4558 nfsm_chain_get_32(error, &nmrep, verf_type); // verifier flavor
4559 nfsm_chain_get_32(error, &nmrep, verf_len); // verifier length
4560 nfsmout_if(error);
4561
4562 switch (req->r_auth) {
4563 case RPCAUTH_NONE:
4564 case RPCAUTH_SYS:
4565 /* Any AUTH_SYS verifier is ignored */
4566 if (verf_len > 0) {
4567 nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len));
4568 }
4569 nfsm_chain_get_32(error, &nmrep, accepted_status);
4570 break;
4571 #if CONFIG_NFS_GSS
4572 case RPCAUTH_KRB5:
4573 case RPCAUTH_KRB5I:
4574 case RPCAUTH_KRB5P:
4575 error = nfs_gss_clnt_verf_get(req, &nmrep,
4576 verf_type, verf_len, &accepted_status);
4577 break;
4578 #endif /* CONFIG_NFS_GSS */
4579 }
4580 nfsmout_if(error);
4581
4582 switch (accepted_status) {
4583 case RPC_SUCCESS:
4584 if (req->r_procnum == NFSPROC_NULL) {
4585 /*
4586 * The NFS null procedure is unique,
4587 * in not returning an NFS status.
4588 */
4589 *status = NFS_OK;
4590 } else {
4591 nfsm_chain_get_32(error, &nmrep, *status);
4592 nfsmout_if(error);
4593 }
4594
4595 if ((nmp->nm_vers != NFS_VER2) && (*status == NFSERR_TRYLATER)) {
4596 /*
4597 * It's a JUKEBOX error - delay and try again
4598 */
4599 int delay, slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
4600
4601 mbuf_freem(mrep);
4602 req->r_nmrep.nmc_mhead = NULL;
4603 if ((req->r_delay >= 30) && !(nmp->nm_state & NFSSTA_MOUNTED)) {
4604 /* we're not yet completely mounted and */
4605 /* we can't complete an RPC, so we fail */
4606 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4607 nfs_softterm(req);
4608 error = req->r_error;
4609 goto nfsmout;
4610 }
4611 req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2);
4612 if (req->r_delay > 30) {
4613 req->r_delay = 30;
4614 }
4615 if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) {
4616 if (!(req->r_flags & R_JBTPRINTFMSG)) {
4617 req->r_flags |= R_JBTPRINTFMSG;
4618 lck_mtx_lock(&nmp->nm_lock);
4619 nmp->nm_jbreqs++;
4620 lck_mtx_unlock(&nmp->nm_lock);
4621 }
4622 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO,
4623 "resource temporarily unavailable (jukebox)", 0);
4624 }
4625 if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) &&
4626 !(req->r_flags & R_NOINTR)) {
4627 /* for soft mounts, just give up after a short while */
4628 OSAddAtomic64(1, &nfsstats.rpctimeouts);
4629 nfs_softterm(req);
4630 error = req->r_error;
4631 goto nfsmout;
4632 }
4633 delay = req->r_delay;
4634 if (req->r_callback.rcb_func) {
4635 struct timeval now;
4636 microuptime(&now);
4637 req->r_resendtime = now.tv_sec + delay;
4638 } else {
4639 do {
4640 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
4641 goto nfsmout;
4642 }
4643 tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz);
4644 slpflag = 0;
4645 } while (--delay > 0);
4646 }
4647 req->r_xid = 0; // get a new XID
4648 req->r_flags |= R_RESTART;
4649 req->r_start = 0;
4650 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER);
4651 return 0;
4652 }
4653
4654 if (req->r_flags & R_JBTPRINTFMSG) {
4655 req->r_flags &= ~R_JBTPRINTFMSG;
4656 lck_mtx_lock(&nmp->nm_lock);
4657 nmp->nm_jbreqs--;
4658 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4659 lck_mtx_unlock(&nmp->nm_lock);
4660 nfs_up(nmp, req->r_thread, clearjbtimeo, "resource available again");
4661 }
4662
4663 #if CONFIG_NFS4
4664 if ((nmp->nm_vers >= NFS_VER4) && (*status == NFSERR_WRONGSEC)) {
4665 /*
4666 * Hmmm... we need to try a different security flavor.
4667 * The first time a request hits this, we will allocate an array
4668 * to track flavors to try. We fill the array with the mount's
4669 * preferred flavors or the server's preferred flavors or just the
4670 * flavors we support.
4671 */
4672 uint32_t srvflavors[NX_MAX_SEC_FLAVORS];
4673 int srvcount, i, j;
4674
4675 /* Call SECINFO to try to get list of flavors from server. */
4676 srvcount = NX_MAX_SEC_FLAVORS;
4677 nfs4_secinfo_rpc(nmp, &req->r_secinfo, req->r_cred, srvflavors, &srvcount);
4678
4679 if (!req->r_wrongsec) {
4680 /* first time... set up flavor array */
4681 MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK);
4682 if (!req->r_wrongsec) {
4683 error = EACCES;
4684 goto nfsmout;
4685 }
4686 i = 0;
4687 if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */
4688 for (; i < nmp->nm_sec.count; i++) {
4689 req->r_wrongsec[i] = nmp->nm_sec.flavors[i];
4690 }
4691 } else if (srvcount) { /* otherwise use the server's list of flavors */
4692 for (; i < srvcount; i++) {
4693 req->r_wrongsec[i] = srvflavors[i];
4694 }
4695 } else { /* otherwise, just try the flavors we support. */
4696 req->r_wrongsec[i++] = RPCAUTH_KRB5P;
4697 req->r_wrongsec[i++] = RPCAUTH_KRB5I;
4698 req->r_wrongsec[i++] = RPCAUTH_KRB5;
4699 req->r_wrongsec[i++] = RPCAUTH_SYS;
4700 req->r_wrongsec[i++] = RPCAUTH_NONE;
4701 }
4702 for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */
4703 req->r_wrongsec[i] = RPCAUTH_INVALID;
4704 }
4705 }
4706
4707 /* clear the current flavor from the list */
4708 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4709 if (req->r_wrongsec[i] == req->r_auth) {
4710 req->r_wrongsec[i] = RPCAUTH_INVALID;
4711 }
4712 }
4713
4714 /* find the next flavor to try */
4715 for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) {
4716 if (req->r_wrongsec[i] != RPCAUTH_INVALID) {
4717 if (!srvcount) { /* no server list, just try it */
4718 break;
4719 }
4720 /* check that it's in the server's list */
4721 for (j = 0; j < srvcount; j++) {
4722 if (req->r_wrongsec[i] == srvflavors[j]) {
4723 break;
4724 }
4725 }
4726 if (j < srvcount) { /* found */
4727 break;
4728 }
4729 /* not found in server list */
4730 req->r_wrongsec[i] = RPCAUTH_INVALID;
4731 }
4732 }
4733 if (i == NX_MAX_SEC_FLAVORS) {
4734 /* nothing left to try! */
4735 error = EACCES;
4736 goto nfsmout;
4737 }
4738
4739 /* retry with the next auth flavor */
4740 req->r_auth = req->r_wrongsec[i];
4741 req->r_xid = 0; // get a new XID
4742 req->r_flags |= R_RESTART;
4743 req->r_start = 0;
4744 FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC);
4745 return 0;
4746 }
4747 if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) {
4748 /*
4749 * We renegotiated security for this request; so update the
4750 * default security flavor for the associated node.
4751 */
4752 if (req->r_np) {
4753 req->r_np->n_auth = req->r_auth;
4754 }
4755 }
4756 #endif /* CONFIG_NFS4 */
4757 if (*status == NFS_OK) {
4758 /*
4759 * Successful NFS request
4760 */
4761 *nmrepp = nmrep;
4762 req->r_nmrep.nmc_mhead = NULL;
4763 break;
4764 }
4765 /* Got an NFS error of some kind */
4766
4767 /*
4768 * If the File Handle was stale, invalidate the
4769 * lookup cache, just in case.
4770 */
4771 if ((*status == ESTALE) && req->r_np) {
4772 cache_purge(NFSTOV(req->r_np));
4773 /* if monitored, also send delete event */
4774 if (vnode_ismonitored(NFSTOV(req->r_np))) {
4775 nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE));
4776 }
4777 }
4778 if (nmp->nm_vers == NFS_VER2) {
4779 mbuf_freem(mrep);
4780 } else {
4781 *nmrepp = nmrep;
4782 }
4783 req->r_nmrep.nmc_mhead = NULL;
4784 error = 0;
4785 break;
4786 case RPC_PROGUNAVAIL:
4787 error = EPROGUNAVAIL;
4788 break;
4789 case RPC_PROGMISMATCH:
4790 error = ERPCMISMATCH;
4791 break;
4792 case RPC_PROCUNAVAIL:
4793 error = EPROCUNAVAIL;
4794 break;
4795 case RPC_GARBAGE:
4796 error = EBADRPC;
4797 break;
4798 case RPC_SYSTEM_ERR:
4799 default:
4800 error = EIO;
4801 break;
4802 }
4803 nfsmout:
4804 if (req->r_flags & R_JBTPRINTFMSG) {
4805 req->r_flags &= ~R_JBTPRINTFMSG;
4806 lck_mtx_lock(&nmp->nm_lock);
4807 nmp->nm_jbreqs--;
4808 clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0;
4809 lck_mtx_unlock(&nmp->nm_lock);
4810 if (clearjbtimeo) {
4811 nfs_up(nmp, req->r_thread, clearjbtimeo, NULL);
4812 }
4813 }
4814 FSDBG(273, R_XID32(req->r_xid), nmp, req,
4815 (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error);
4816 return error;
4817 }
4818
4819 /*
4820 * NFS request using a GSS/Kerberos security flavor?
4821 */
4822 int
4823 nfs_request_using_gss(struct nfsreq *req)
4824 {
4825 if (!req->r_gss_ctx) {
4826 return 0;
4827 }
4828 switch (req->r_auth) {
4829 case RPCAUTH_KRB5:
4830 case RPCAUTH_KRB5I:
4831 case RPCAUTH_KRB5P:
4832 return 1;
4833 }
4834 return 0;
4835 }
4836
4837 /*
4838 * Perform an NFS request synchronously.
4839 */
4840
4841 int
4842 nfs_request(
4843 nfsnode_t np,
4844 mount_t mp, /* used only if !np */
4845 struct nfsm_chain *nmrest,
4846 int procnum,
4847 vfs_context_t ctx,
4848 struct nfsreq_secinfo_args *si,
4849 struct nfsm_chain *nmrepp,
4850 u_int64_t *xidp,
4851 int *status)
4852 {
4853 return nfs_request2(np, mp, nmrest, procnum,
4854 vfs_context_thread(ctx), vfs_context_ucred(ctx),
4855 si, 0, nmrepp, xidp, status);
4856 }
4857
4858 int
4859 nfs_request2(
4860 nfsnode_t np,
4861 mount_t mp, /* used only if !np */
4862 struct nfsm_chain *nmrest,
4863 int procnum,
4864 thread_t thd,
4865 kauth_cred_t cred,
4866 struct nfsreq_secinfo_args *si,
4867 int flags,
4868 struct nfsm_chain *nmrepp,
4869 u_int64_t *xidp,
4870 int *status)
4871 {
4872 struct nfsreq *req;
4873 int error;
4874
4875 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4876 if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) {
4877 goto out_free;
4878 }
4879 req->r_flags |= (flags & (R_OPTMASK | R_SOFT));
4880 if (si) {
4881 req->r_secinfo = *si;
4882 }
4883
4884 FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0);
4885 do {
4886 req->r_error = 0;
4887 req->r_flags &= ~R_RESTART;
4888 if ((error = nfs_request_add_header(req))) {
4889 break;
4890 }
4891 if (xidp) {
4892 *xidp = req->r_xid;
4893 }
4894 if ((error = nfs_request_send(req, 1))) {
4895 break;
4896 }
4897 nfs_request_wait(req);
4898 if ((error = nfs_request_finish(req, nmrepp, status))) {
4899 break;
4900 }
4901 } while (req->r_flags & R_RESTART);
4902
4903 FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error);
4904 nfs_request_rele(req);
4905 out_free:
4906 NFS_ZFREE(nfs_req_zone, req);
4907 return error;
4908 }
4909
4910
4911 #if CONFIG_NFS_GSS
4912 /*
4913 * Set up a new null proc request to exchange GSS context tokens with the
4914 * server. Associate the context that we are setting up with the request that we
4915 * are sending.
4916 */
4917
4918 int
4919 nfs_request_gss(
4920 mount_t mp,
4921 struct nfsm_chain *nmrest,
4922 thread_t thd,
4923 kauth_cred_t cred,
4924 int flags,
4925 struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */
4926 struct nfsm_chain *nmrepp,
4927 int *status)
4928 {
4929 struct nfsreq *req;
4930 int error, wait = 1;
4931
4932 req = zalloc_flags(nfs_req_zone, Z_WAITOK);
4933 if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) {
4934 goto out_free;
4935 }
4936 req->r_flags |= (flags & R_OPTMASK);
4937
4938 if (cp == NULL) {
4939 printf("nfs_request_gss request has no context\n");
4940 nfs_request_rele(req);
4941 error = NFSERR_EAUTH;
4942 goto out_free;
4943 }
4944 nfs_gss_clnt_ctx_ref(req, cp);
4945
4946 /*
4947 * Don't wait for a reply to a context destroy advisory
4948 * to avoid hanging on a dead server.
4949 */
4950 if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
4951 wait = 0;
4952 }
4953
4954 FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0);
4955 do {
4956 req->r_error = 0;
4957 req->r_flags &= ~R_RESTART;
4958 if ((error = nfs_request_add_header(req))) {
4959 break;
4960 }
4961
4962 if ((error = nfs_request_send(req, wait))) {
4963 break;
4964 }
4965 if (!wait) {
4966 break;
4967 }
4968
4969 nfs_request_wait(req);
4970 if ((error = nfs_request_finish(req, nmrepp, status))) {
4971 break;
4972 }
4973 } while (req->r_flags & R_RESTART);
4974
4975 FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error);
4976
4977 nfs_gss_clnt_ctx_unref(req);
4978 nfs_request_rele(req);
4979 out_free:
4980 NFS_ZFREE(nfs_req_zone, req);
4981 return error;
4982 }
4983 #endif /* CONFIG_NFS_GSS */
4984
4985 /*
4986 * Create and start an asynchronous NFS request.
4987 */
4988 int
4989 nfs_request_async(
4990 nfsnode_t np,
4991 mount_t mp, /* used only if !np */
4992 struct nfsm_chain *nmrest,
4993 int procnum,
4994 thread_t thd,
4995 kauth_cred_t cred,
4996 struct nfsreq_secinfo_args *si,
4997 int flags,
4998 struct nfsreq_cbinfo *cb,
4999 struct nfsreq **reqp)
5000 {
5001 struct nfsreq *req;
5002 struct nfsmount *nmp;
5003 int error, sent;
5004
5005 error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp);
5006 req = *reqp;
5007 FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error);
5008 if (error) {
5009 return error;
5010 }
5011 req->r_flags |= (flags & R_OPTMASK);
5012 req->r_flags |= R_ASYNC;
5013 if (si) {
5014 req->r_secinfo = *si;
5015 }
5016 if (cb) {
5017 req->r_callback = *cb;
5018 }
5019 error = nfs_request_add_header(req);
5020 if (!error) {
5021 req->r_flags |= R_WAITSENT;
5022 if (req->r_callback.rcb_func) {
5023 nfs_request_ref(req, 0);
5024 }
5025 error = nfs_request_send(req, 1);
5026 lck_mtx_lock(&req->r_mtx);
5027 if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) {
5028 /* make sure to wait until this async I/O request gets sent */
5029 int slpflag = (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
5030 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5031 while (!(req->r_flags & R_SENT)) {
5032 nmp = req->r_nmp;
5033 if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) {
5034 lck_mtx_lock(&nmp->nm_lock);
5035 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5036 /*
5037 * It's not going to get off the resend queue if we're in recovery.
5038 * So, just take it off ourselves. We could be holding mount state
5039 * busy and thus holding up the start of recovery.
5040 */
5041 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5042 req->r_flags &= ~R_RESENDQ;
5043 req->r_rchain.tqe_next = NFSREQNOLIST;
5044 lck_mtx_unlock(&nmp->nm_lock);
5045 req->r_flags |= R_SENDING;
5046 lck_mtx_unlock(&req->r_mtx);
5047 error = nfs_send(req, 1);
5048 /* Remove the R_RESENDQ reference */
5049 nfs_request_rele(req);
5050 lck_mtx_lock(&req->r_mtx);
5051 if (error) {
5052 break;
5053 }
5054 continue;
5055 }
5056 lck_mtx_unlock(&nmp->nm_lock);
5057 }
5058 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5059 break;
5060 }
5061 msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts);
5062 slpflag = 0;
5063 }
5064 }
5065 sent = req->r_flags & R_SENT;
5066 lck_mtx_unlock(&req->r_mtx);
5067 if (error && req->r_callback.rcb_func && !sent) {
5068 nfs_request_rele(req);
5069 }
5070 }
5071 FSDBG(274, R_XID32(req->r_xid), np, procnum, error);
5072 if (error || req->r_callback.rcb_func) {
5073 nfs_request_rele(req);
5074 }
5075
5076 return error;
5077 }
5078
5079 /*
5080 * Wait for and finish an asynchronous NFS request.
5081 */
5082 int
5083 nfs_request_async_finish(
5084 struct nfsreq *req,
5085 struct nfsm_chain *nmrepp,
5086 u_int64_t *xidp,
5087 int *status)
5088 {
5089 int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
5090 struct nfsmount *nmp;
5091
5092 lck_mtx_lock(&req->r_mtx);
5093 if (!asyncio) {
5094 req->r_flags |= R_ASYNCWAIT;
5095 }
5096 while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
5097 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
5098
5099 if ((nmp = req->r_nmp)) {
5100 lck_mtx_lock(&nmp->nm_lock);
5101 if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) {
5102 /*
5103 * It's not going to get off the resend queue if we're in recovery.
5104 * So, just take it off ourselves. We could be holding mount state
5105 * busy and thus holding up the start of recovery.
5106 */
5107 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
5108 req->r_flags &= ~R_RESENDQ;
5109 req->r_rchain.tqe_next = NFSREQNOLIST;
5110 /* Remove the R_RESENDQ reference */
5111 assert(req->r_refs > 0);
5112 req->r_refs--;
5113 lck_mtx_unlock(&nmp->nm_lock);
5114 break;
5115 }
5116 lck_mtx_unlock(&nmp->nm_lock);
5117 }
5118 if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
5119 break;
5120 }
5121 msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts);
5122 }
5123 lck_mtx_unlock(&req->r_mtx);
5124
5125 if (!error) {
5126 nfs_request_wait(req);
5127 error = nfs_request_finish(req, nmrepp, status);
5128 }
5129
5130 while (!error && (req->r_flags & R_RESTART)) {
5131 if (asyncio) {
5132 assert(req->r_achain.tqe_next == NFSREQNOLIST);
5133 lck_mtx_lock(&req->r_mtx);
5134 req->r_flags &= ~R_IOD;
5135 if (req->r_resendtime) { /* send later */
5136 nfs_asyncio_resend(req);
5137 lck_mtx_unlock(&req->r_mtx);
5138 return EINPROGRESS;
5139 }
5140 lck_mtx_unlock(&req->r_mtx);
5141 }
5142 req->r_error = 0;
5143 req->r_flags &= ~R_RESTART;
5144 if ((error = nfs_request_add_header(req))) {
5145 break;
5146 }
5147 if ((error = nfs_request_send(req, !asyncio))) {
5148 break;
5149 }
5150 if (asyncio) {
5151 return EINPROGRESS;
5152 }
5153 nfs_request_wait(req);
5154 if ((error = nfs_request_finish(req, nmrepp, status))) {
5155 break;
5156 }
5157 }
5158 if (xidp) {
5159 *xidp = req->r_xid;
5160 }
5161
5162 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error);
5163 nfs_request_rele(req);
5164 return error;
5165 }
5166
5167 /*
5168 * Cancel a pending asynchronous NFS request.
5169 */
5170 void
5171 nfs_request_async_cancel(struct nfsreq *req)
5172 {
5173 FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, 0xD1ED1E);
5174 nfs_request_rele(req);
5175 }
5176
5177 /*
5178 * Flag a request as being terminated.
5179 */
5180 void
5181 nfs_softterm(struct nfsreq *req)
5182 {
5183 struct nfsmount *nmp = req->r_nmp;
5184 req->r_flags |= R_SOFTTERM;
5185 req->r_error = ETIMEDOUT;
5186 if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) {
5187 return;
5188 }
5189 /* update congestion window */
5190 req->r_flags &= ~R_CWND;
5191 lck_mtx_lock(&nmp->nm_lock);
5192 FSDBG(532, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd);
5193 nmp->nm_sent -= NFS_CWNDSCALE;
5194 if ((nmp->nm_sent < nmp->nm_cwnd) && !TAILQ_EMPTY(&nmp->nm_cwndq)) {
5195 /* congestion window is open, poke the cwnd queue */
5196 struct nfsreq *req2 = TAILQ_FIRST(&nmp->nm_cwndq);
5197 TAILQ_REMOVE(&nmp->nm_cwndq, req2, r_cchain);
5198 req2->r_cchain.tqe_next = NFSREQNOLIST;
5199 wakeup(req2);
5200 }
5201 lck_mtx_unlock(&nmp->nm_lock);
5202 }
5203
5204 /*
5205 * Ensure req isn't in use by the timer, then dequeue it.
5206 */
5207 void
5208 nfs_reqdequeue(struct nfsreq *req)
5209 {
5210 lck_mtx_lock(&nfs_request_mutex);
5211 while (req->r_lflags & RL_BUSY) {
5212 req->r_lflags |= RL_WAITING;
5213 msleep(&req->r_lflags, &nfs_request_mutex, PSOCK, "reqdeq", NULL);
5214 }
5215 if (req->r_lflags & RL_QUEUED) {
5216 TAILQ_REMOVE(&nfs_reqq, req, r_chain);
5217 req->r_lflags &= ~RL_QUEUED;
5218 }
5219 lck_mtx_unlock(&nfs_request_mutex);
5220 }
5221
5222 /*
5223 * Busy (lock) a nfsreq, used by the nfs timer to make sure it's not
5224 * free()'d out from under it.
5225 */
5226 void
5227 nfs_reqbusy(struct nfsreq *req)
5228 {
5229 if (req->r_lflags & RL_BUSY) {
5230 panic("req locked");
5231 }
5232 req->r_lflags |= RL_BUSY;
5233 }
5234
5235 /*
5236 * Unbusy the nfsreq passed in, return the next nfsreq in the chain busied.
5237 */
5238 struct nfsreq *
5239 nfs_reqnext(struct nfsreq *req)
5240 {
5241 struct nfsreq * nextreq;
5242
5243 if (req == NULL) {
5244 return NULL;
5245 }
5246 /*
5247 * We need to get and busy the next req before signalling the
5248 * current one, otherwise wakeup() may block us and we'll race to
5249 * grab the next req.
5250 */
5251 nextreq = TAILQ_NEXT(req, r_chain);
5252 if (nextreq != NULL) {
5253 nfs_reqbusy(nextreq);
5254 }
5255 /* unbusy and signal. */
5256 req->r_lflags &= ~RL_BUSY;
5257 if (req->r_lflags & RL_WAITING) {
5258 req->r_lflags &= ~RL_WAITING;
5259 wakeup(&req->r_lflags);
5260 }
5261 return nextreq;
5262 }
5263
5264 /*
5265 * NFS request queue timer routine
5266 *
5267 * Scan the NFS request queue for any requests that have timed out.
5268 *
5269 * Alert the system of unresponsive servers.
5270 * Mark expired requests on soft mounts as terminated.
5271 * For UDP, mark/signal requests for retransmission.
5272 */
5273 void
5274 nfs_request_timer(__unused void *param0, __unused void *param1)
5275 {
5276 struct nfsreq *req;
5277 struct nfsmount *nmp;
5278 int timeo, maxtime, finish_asyncio, error;
5279 struct timeval now;
5280 TAILQ_HEAD(nfs_mount_pokeq, nfsmount) nfs_mount_poke_queue;
5281 TAILQ_INIT(&nfs_mount_poke_queue);
5282
5283 restart:
5284 lck_mtx_lock(&nfs_request_mutex);
5285 req = TAILQ_FIRST(&nfs_reqq);
5286 if (req == NULL) { /* no requests - turn timer off */
5287 nfs_request_timer_on = 0;
5288 lck_mtx_unlock(&nfs_request_mutex);
5289 return;
5290 }
5291
5292 nfs_reqbusy(req);
5293
5294 microuptime(&now);
5295 for (; req != NULL; req = nfs_reqnext(req)) {
5296 nmp = req->r_nmp;
5297 if (nmp == NULL) {
5298 NFS_SOCK_DBG("Found a request with out a mount!\n");
5299 continue;
5300 }
5301 if (req->r_error || req->r_nmrep.nmc_mhead) {
5302 continue;
5303 }
5304 if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) {
5305 if (req->r_callback.rcb_func != NULL) {
5306 /* async I/O RPC needs to be finished */
5307 lck_mtx_lock(&req->r_mtx);
5308 req->r_error = error;
5309 finish_asyncio = !(req->r_flags & R_WAITSENT);
5310 wakeup(req);
5311 lck_mtx_unlock(&req->r_mtx);
5312 if (finish_asyncio) {
5313 nfs_asyncio_finish(req);
5314 }
5315 }
5316 continue;
5317 }
5318
5319 lck_mtx_lock(&req->r_mtx);
5320
5321 if (nmp->nm_tprintf_initial_delay &&
5322 ((req->r_rexmit > 2) || (req->r_flags & R_RESENDERR)) &&
5323 ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) {
5324 req->r_lastmsg = now.tv_sec;
5325 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5326 "not responding", 1);
5327 req->r_flags |= R_TPRINTFMSG;
5328 lck_mtx_lock(&nmp->nm_lock);
5329 if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
5330 lck_mtx_unlock(&nmp->nm_lock);
5331 /* we're not yet completely mounted and */
5332 /* we can't complete an RPC, so we fail */
5333 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5334 nfs_softterm(req);
5335 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5336 wakeup(req);
5337 lck_mtx_unlock(&req->r_mtx);
5338 if (finish_asyncio) {
5339 nfs_asyncio_finish(req);
5340 }
5341 continue;
5342 }
5343 lck_mtx_unlock(&nmp->nm_lock);
5344 }
5345
5346 /*
5347 * Put a reasonable limit on the maximum timeout,
5348 * and reduce that limit when soft mounts get timeouts or are in reconnect.
5349 */
5350 if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) {
5351 maxtime = NFS_MAXTIMEO;
5352 } else if ((req->r_flags & (R_SETUP | R_RECOVER)) ||
5353 ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) {
5354 maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2;
5355 } else {
5356 maxtime = NFS_MINTIMEO / 4;
5357 }
5358
5359 /*
5360 * Check for request timeout.
5361 */
5362 if (req->r_rtt >= 0) {
5363 req->r_rtt++;
5364 lck_mtx_lock(&nmp->nm_lock);
5365 if (req->r_flags & R_RESENDERR) {
5366 /* with resend errors, retry every few seconds */
5367 timeo = 4 * hz;
5368 } else {
5369 if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) {
5370 timeo = NFS_MINIDEMTIMEO; // gss context setup
5371 } else if (NMFLAG(nmp, DUMBTIMER)) {
5372 timeo = nmp->nm_timeo;
5373 } else {
5374 timeo = NFS_RTO(nmp, proct[req->r_procnum]);
5375 }
5376
5377 /* ensure 62.5 ms floor */
5378 while (16 * timeo < hz) {
5379 timeo *= 2;
5380 }
5381 if (nmp->nm_timeouts > 0) {
5382 timeo *= nfs_backoff[nmp->nm_timeouts - 1];
5383 }
5384 }
5385 /* limit timeout to max */
5386 if (timeo > maxtime) {
5387 timeo = maxtime;
5388 }
5389 if (req->r_rtt <= timeo) {
5390 NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo);
5391 lck_mtx_unlock(&nmp->nm_lock);
5392 lck_mtx_unlock(&req->r_mtx);
5393 continue;
5394 }
5395 /* The request has timed out */
5396 NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n",
5397 req->r_procnum, proct[req->r_procnum],
5398 req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts,
5399 (now.tv_sec - req->r_start) * NFS_HZ, maxtime);
5400 if (nmp->nm_timeouts < 8) {
5401 nmp->nm_timeouts++;
5402 }
5403 if (nfs_mount_check_dead_timeout(nmp)) {
5404 /* Unbusy this request */
5405 req->r_lflags &= ~RL_BUSY;
5406 if (req->r_lflags & RL_WAITING) {
5407 req->r_lflags &= ~RL_WAITING;
5408 wakeup(&req->r_lflags);
5409 }
5410 lck_mtx_unlock(&req->r_mtx);
5411
5412 /* No need to poke this mount */
5413 if (nmp->nm_sockflags & NMSOCK_POKE) {
5414 nmp->nm_sockflags &= ~NMSOCK_POKE;
5415 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5416 }
5417 /* Release our lock state, so we can become a zombie */
5418 lck_mtx_unlock(&nfs_request_mutex);
5419
5420 /*
5421 * Note nfs_mount_make zombie(nmp) must be
5422 * called with nm_lock held. After doing some
5423 * work we release nm_lock in
5424 * nfs_make_mount_zombie with out acquiring any
5425 * other locks. (Later, in nfs_mount_zombie we
5426 * will acquire &nfs_request_mutex, r_mtx,
5427 * nm_lock in that order). So we should not be
5428 * introducing deadlock here. We take a reference
5429 * on the mount so that its still there when we
5430 * release the lock.
5431 */
5432 nmp->nm_ref++;
5433 nfs_mount_make_zombie(nmp);
5434 lck_mtx_unlock(&nmp->nm_lock);
5435 nfs_mount_rele(nmp);
5436
5437 /*
5438 * All the request for this mount have now been
5439 * removed from the request queue. Restart to
5440 * process the remaining mounts
5441 */
5442 goto restart;
5443 }
5444
5445 /* if it's been a few seconds, try poking the socket */
5446 if ((nmp->nm_sotype == SOCK_STREAM) &&
5447 ((now.tv_sec - req->r_start) >= 3) &&
5448 !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) &&
5449 (nmp->nm_sockflags & NMSOCK_READY)) {
5450 nmp->nm_sockflags |= NMSOCK_POKE;
5451 /*
5452 * We take a ref on the mount so that we know the mount will still be there
5453 * when we process the nfs_mount_poke_queue. An unmount request will block
5454 * in nfs_mount_drain_and_cleanup until after the poke is finished. We release
5455 * the reference after calling nfs_sock_poke below;
5456 */
5457 nmp->nm_ref++;
5458 TAILQ_INSERT_TAIL(&nfs_mount_poke_queue, nmp, nm_pokeq);
5459 }
5460 lck_mtx_unlock(&nmp->nm_lock);
5461 }
5462
5463 /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */
5464 if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) &&
5465 ((req->r_rexmit >= req->r_retry) || /* too many */
5466 ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */
5467 OSAddAtomic64(1, &nfsstats.rpctimeouts);
5468 lck_mtx_lock(&nmp->nm_lock);
5469 if (!(nmp->nm_state & NFSSTA_TIMEO)) {
5470 lck_mtx_unlock(&nmp->nm_lock);
5471 /* make sure we note the unresponsive server */
5472 /* (maxtime may be less than tprintf delay) */
5473 nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO,
5474 "not responding", 1);
5475 req->r_lastmsg = now.tv_sec;
5476 req->r_flags |= R_TPRINTFMSG;
5477 } else {
5478 lck_mtx_unlock(&nmp->nm_lock);
5479 }
5480 if (req->r_flags & R_NOINTR) {
5481 /* don't terminate nointr requests on timeout */
5482 lck_mtx_unlock(&req->r_mtx);
5483 continue;
5484 }
5485 NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n",
5486 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt,
5487 now.tv_sec - req->r_start);
5488 nfs_softterm(req);
5489 finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT));
5490 wakeup(req);
5491 lck_mtx_unlock(&req->r_mtx);
5492 if (finish_asyncio) {
5493 nfs_asyncio_finish(req);
5494 }
5495 continue;
5496 }
5497
5498 /* for TCP, only resend if explicitly requested */
5499 if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) {
5500 if (++req->r_rexmit > NFS_MAXREXMIT) {
5501 req->r_rexmit = NFS_MAXREXMIT;
5502 }
5503 req->r_rtt = 0;
5504 lck_mtx_unlock(&req->r_mtx);
5505 continue;
5506 }
5507
5508 /*
5509 * The request needs to be (re)sent. Kick the requester to resend it.
5510 * (unless it's already marked as needing a resend)
5511 */
5512 if ((req->r_flags & R_MUSTRESEND) && (req->r_rtt == -1)) {
5513 lck_mtx_unlock(&req->r_mtx);
5514 continue;
5515 }
5516 NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n",
5517 req->r_procnum, req->r_xid, req->r_flags, req->r_rtt);
5518 req->r_flags |= R_MUSTRESEND;
5519 req->r_rtt = -1;
5520 wakeup(req);
5521 if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) {
5522 nfs_asyncio_resend(req);
5523 }
5524 lck_mtx_unlock(&req->r_mtx);
5525 }
5526
5527 lck_mtx_unlock(&nfs_request_mutex);
5528
5529 /* poke any sockets */
5530 while ((nmp = TAILQ_FIRST(&nfs_mount_poke_queue))) {
5531 TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq);
5532 nfs_sock_poke(nmp);
5533 nfs_mount_rele(nmp);
5534 }
5535
5536 nfs_interval_timer_start(nfs_request_timer_call, NFS_REQUESTDELAY);
5537 }
5538
5539 /*
5540 * check a thread's proc for the "noremotehang" flag.
5541 */
5542 int
5543 nfs_noremotehang(thread_t thd)
5544 {
5545 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
5546 return p && proc_noremotehang(p);
5547 }
5548
5549 /*
5550 * Test for a termination condition pending on the process.
5551 * This is used to determine if we need to bail on a mount.
5552 * ETIMEDOUT is returned if there has been a soft timeout.
5553 * EINTR is returned if there is a signal pending that is not being ignored
5554 * ESHUTDOWN is return if the system is in shutdown.
5555 * and the mount is interruptable, or if we are a thread that is in the process
5556 * of cancellation (also SIGKILL posted).
5557 */
5558 extern int sigprop[NSIG + 1];
5559 int
5560 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
5561 {
5562 proc_t p;
5563 int error = 0;
5564
5565 if (!nmp) {
5566 return ENXIO;
5567 }
5568
5569 if (get_system_inshutdown()) {
5570 NFS_SOCK_DBG("Shutdown in progress\n");
5571 return ESHUTDOWN;
5572 }
5573
5574 if (req && (req->r_flags & R_SOFTTERM)) {
5575 return ETIMEDOUT; /* request has been terminated. */
5576 }
5577 if (req && (req->r_flags & R_NOINTR)) {
5578 thd = NULL; /* don't check for signal on R_NOINTR */
5579 }
5580 if (!nmplocked) {
5581 lck_mtx_lock(&nmp->nm_lock);
5582 }
5583 if (nmp->nm_state & NFSSTA_FORCE) {
5584 /* If a force unmount is in progress then fail. */
5585 error = EIO;
5586 } else if (vfs_isforce(nmp->nm_mountp)) {
5587 /* Someone is unmounting us, go soft and mark it. */
5588 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
5589 nmp->nm_state |= NFSSTA_FORCE;
5590 }
5591
5592 /* Check if the mount is marked dead. */
5593 if (!error && (nmp->nm_state & NFSSTA_DEAD)) {
5594 error = ENXIO;
5595 }
5596
5597 /*
5598 * If the mount is hung and we've requested not to hang
5599 * on remote filesystems, then bail now.
5600 */
5601 if (current_proc() != kernproc &&
5602 !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) {
5603 error = EIO;
5604 }
5605
5606 if (!nmplocked) {
5607 lck_mtx_unlock(&nmp->nm_lock);
5608 }
5609 if (error) {
5610 return error;
5611 }
5612
5613 /* may not have a thread for async I/O */
5614 if (thd == NULL || current_proc() == kernproc) {
5615 return 0;
5616 }
5617
5618 /*
5619 * Check if the process is aborted, but don't interrupt if we
5620 * were killed by a signal and this is the exiting thread which
5621 * is attempting to dump core.
5622 */
5623 if (((p = current_proc()) != kernproc) && current_thread_aborted() &&
5624 (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) ||
5625 (p->p_sigacts == NULL) ||
5626 (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) ||
5627 !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) {
5628 return EINTR;
5629 }
5630
5631 /* mask off thread and process blocked signals. */
5632 if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) &&
5633 proc_pendingsignals(p, NFSINT_SIGMASK)) {
5634 return EINTR;
5635 }
5636 return 0;
5637 }
5638
5639 /*
5640 * Lock a socket against others.
5641 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
5642 * and also to avoid race conditions between the processes with nfs requests
5643 * in progress when a reconnect is necessary.
5644 */
5645 int
5646 nfs_sndlock(struct nfsreq *req)
5647 {
5648 struct nfsmount *nmp = req->r_nmp;
5649 int *statep;
5650 int error = 0, slpflag = 0;
5651 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 };
5652
5653 if (nfs_mount_gone(nmp)) {
5654 return ENXIO;
5655 }
5656
5657 lck_mtx_lock(&nmp->nm_lock);
5658 statep = &nmp->nm_state;
5659
5660 if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) {
5661 slpflag = PCATCH;
5662 }
5663 while (*statep & NFSSTA_SNDLOCK) {
5664 if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) {
5665 break;
5666 }
5667 *statep |= NFSSTA_WANTSND;
5668 if (nfs_noremotehang(req->r_thread)) {
5669 ts.tv_sec = 1;
5670 }
5671 msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts);
5672 if (slpflag == PCATCH) {
5673 slpflag = 0;
5674 ts.tv_sec = 2;
5675 }
5676 }
5677 if (!error) {
5678 *statep |= NFSSTA_SNDLOCK;
5679 }
5680 lck_mtx_unlock(&nmp->nm_lock);
5681 return error;
5682 }
5683
5684 /*
5685 * Unlock the stream socket for others.
5686 */
5687 void
5688 nfs_sndunlock(struct nfsreq *req)
5689 {
5690 struct nfsmount *nmp = req->r_nmp;
5691 int *statep, wake = 0;
5692
5693 if (!nmp) {
5694 return;
5695 }
5696 lck_mtx_lock(&nmp->nm_lock);
5697 statep = &nmp->nm_state;
5698 if ((*statep & NFSSTA_SNDLOCK) == 0) {
5699 panic("nfs sndunlock");
5700 }
5701 *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING);
5702 if (*statep & NFSSTA_WANTSND) {
5703 *statep &= ~NFSSTA_WANTSND;
5704 wake = 1;
5705 }
5706 lck_mtx_unlock(&nmp->nm_lock);
5707 if (wake) {
5708 wakeup(statep);
5709 }
5710 }
5711
5712 int
5713 nfs_aux_request(
5714 struct nfsmount *nmp,
5715 thread_t thd,
5716 struct sockaddr *saddr,
5717 socket_t so,
5718 int sotype,
5719 mbuf_t mreq,
5720 uint32_t xid,
5721 int bindresv,
5722 int timeo,
5723 struct nfsm_chain *nmrep)
5724 {
5725 int error = 0, on = 1, try, sendat = 2, soproto, recv, optlen, restoreto = 0;
5726 socket_t newso = NULL;
5727 struct sockaddr_storage ss;
5728 struct timeval orig_rcvto, orig_sndto, tv = { .tv_sec = 1, .tv_usec = 0 };
5729 mbuf_t m, mrep = NULL;
5730 struct msghdr msg;
5731 uint32_t rxid = 0, reply = 0, reply_status, rejected_status;
5732 uint32_t verf_type, verf_len, accepted_status;
5733 size_t readlen, sentlen;
5734 struct nfs_rpc_record_state nrrs;
5735
5736 if (!so) {
5737 /* create socket and set options */
5738 if (saddr->sa_family == AF_LOCAL) {
5739 soproto = 0;
5740 } else {
5741 soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP;
5742 }
5743 if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) {
5744 goto nfsmout;
5745 }
5746
5747 if (bindresv && saddr->sa_family != AF_LOCAL) {
5748 int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6;
5749 int optname = (saddr->sa_family == AF_INET) ? IP_PORTRANGE : IPV6_PORTRANGE;
5750 int portrange = IP_PORTRANGE_LOW;
5751 error = sock_setsockopt(newso, level, optname, &portrange, sizeof(portrange));
5752 nfsmout_if(error);
5753 ss.ss_len = saddr->sa_len;
5754 ss.ss_family = saddr->sa_family;
5755 if (ss.ss_family == AF_INET) {
5756 ((struct sockaddr_in*)&ss)->sin_addr.s_addr = INADDR_ANY;
5757 ((struct sockaddr_in*)&ss)->sin_port = htons(0);
5758 } else if (ss.ss_family == AF_INET6) {
5759 ((struct sockaddr_in6*)&ss)->sin6_addr = in6addr_any;
5760 ((struct sockaddr_in6*)&ss)->sin6_port = htons(0);
5761 } else {
5762 error = EINVAL;
5763 }
5764 if (!error) {
5765 error = sock_bind(newso, (struct sockaddr *)&ss);
5766 }
5767 nfsmout_if(error);
5768 }
5769
5770 if (sotype == SOCK_STREAM) {
5771 # define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */
5772 int count = 0;
5773
5774 error = sock_connect(newso, saddr, MSG_DONTWAIT);
5775 if (error == EINPROGRESS) {
5776 error = 0;
5777 }
5778 nfsmout_if(error);
5779
5780 while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) {
5781 /* After NFS_AUX_CONNECTION_TIMEOUT bail */
5782 if (++count >= NFS_AUX_CONNECTION_TIMEOUT) {
5783 error = ETIMEDOUT;
5784 break;
5785 }
5786 }
5787 nfsmout_if(error);
5788 }
5789 if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) ||
5790 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) ||
5791 ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) {
5792 goto nfsmout;
5793 }
5794 so = newso;
5795 } else {
5796 /* make sure socket is using a one second timeout in this function */
5797 optlen = sizeof(orig_rcvto);
5798 error = sock_getsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, &optlen);
5799 if (!error) {
5800 optlen = sizeof(orig_sndto);
5801 error = sock_getsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, &optlen);
5802 }
5803 if (!error) {
5804 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
5805 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
5806 restoreto = 1;
5807 }
5808 }
5809
5810 if (sotype == SOCK_STREAM) {
5811 sendat = 0; /* we only resend the request for UDP */
5812 nfs_rpc_record_state_init(&nrrs);
5813 }
5814
5815 for (try = 0; try < timeo; try++) {
5816 if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) {
5817 break;
5818 }
5819 if (!try || (try == sendat)) {
5820 /* send the request (resending periodically for UDP) */
5821 if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) {
5822 goto nfsmout;
5823 }
5824 bzero(&msg, sizeof(msg));
5825 if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) {
5826 msg.msg_name = saddr;
5827 msg.msg_namelen = saddr->sa_len;
5828 }
5829 if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) {
5830 goto nfsmout;
5831 }
5832 sendat *= 2;
5833 if (sendat > 30) {
5834 sendat = 30;
5835 }
5836 }
5837 /* wait for the response */
5838 if (sotype == SOCK_STREAM) {
5839 /* try to read (more of) record */
5840 error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep);
5841 /* if we don't have the whole record yet, we'll keep trying */
5842 } else {
5843 readlen = 1 << 18;
5844 bzero(&msg, sizeof(msg));
5845 error = sock_receivembuf(so, &msg, &mrep, 0, &readlen);
5846 }
5847 if (error == EWOULDBLOCK) {
5848 continue;
5849 }
5850 nfsmout_if(error);
5851 /* parse the response */
5852 nfsm_chain_dissect_init(error, nmrep, mrep);
5853 nfsm_chain_get_32(error, nmrep, rxid);
5854 nfsm_chain_get_32(error, nmrep, reply);
5855 nfsmout_if(error);
5856 if ((rxid != xid) || (reply != RPC_REPLY)) {
5857 error = EBADRPC;
5858 }
5859 nfsm_chain_get_32(error, nmrep, reply_status);
5860 nfsmout_if(error);
5861 if (reply_status == RPC_MSGDENIED) {
5862 nfsm_chain_get_32(error, nmrep, rejected_status);
5863 nfsmout_if(error);
5864 error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES;
5865 goto nfsmout;
5866 }
5867 nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */
5868 nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */
5869 nfsmout_if(error);
5870 if (verf_len) {
5871 nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len));
5872 }
5873 nfsm_chain_get_32(error, nmrep, accepted_status);
5874 nfsmout_if(error);
5875 switch (accepted_status) {
5876 case RPC_SUCCESS:
5877 error = 0;
5878 break;
5879 case RPC_PROGUNAVAIL:
5880 error = EPROGUNAVAIL;
5881 break;
5882 case RPC_PROGMISMATCH:
5883 error = EPROGMISMATCH;
5884 break;
5885 case RPC_PROCUNAVAIL:
5886 error = EPROCUNAVAIL;
5887 break;
5888 case RPC_GARBAGE:
5889 error = EBADRPC;
5890 break;
5891 case RPC_SYSTEM_ERR:
5892 default:
5893 error = EIO;
5894 break;
5895 }
5896 break;
5897 }
5898 nfsmout:
5899 if (restoreto) {
5900 sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &orig_rcvto, sizeof(tv));
5901 sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &orig_sndto, sizeof(tv));
5902 }
5903 if (newso) {
5904 sock_shutdown(newso, SHUT_RDWR);
5905 sock_close(newso);
5906 }
5907 mbuf_freem(mreq);
5908 return error;
5909 }
5910
5911 int
5912 nfs_portmap_lookup(
5913 struct nfsmount *nmp,
5914 vfs_context_t ctx,
5915 struct sockaddr *sa,
5916 socket_t so,
5917 uint32_t protocol,
5918 uint32_t vers,
5919 uint32_t stype,
5920 int timeo)
5921 {
5922 thread_t thd = vfs_context_thread(ctx);
5923 kauth_cred_t cred = vfs_context_ucred(ctx);
5924 struct sockaddr_storage ss;
5925 struct sockaddr *saddr = (struct sockaddr*)&ss;
5926 static struct sockaddr_un rpcbind_cots = {
5927 sizeof(struct sockaddr_un),
5928 AF_LOCAL,
5929 RPCB_TICOTSORD_PATH
5930 };
5931 static struct sockaddr_un rpcbind_clts = {
5932 sizeof(struct sockaddr_un),
5933 AF_LOCAL,
5934 RPCB_TICLTS_PATH
5935 };
5936 struct nfsm_chain nmreq, nmrep;
5937 mbuf_t mreq;
5938 int error = 0, ip, pmprog, pmvers, pmproc;
5939 uint32_t ualen = 0, scopeid = 0, port32;
5940 uint64_t xid = 0;
5941 char uaddr[MAX_IPv6_STR_LEN + 16];
5942
5943 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
5944 if (saddr->sa_family == AF_INET) {
5945 ip = 4;
5946 pmprog = PMAPPROG;
5947 pmvers = PMAPVERS;
5948 pmproc = PMAPPROC_GETPORT;
5949 } else if (saddr->sa_family == AF_INET6) {
5950 ip = 6;
5951 pmprog = RPCBPROG;
5952 pmvers = RPCBVERS4;
5953 pmproc = RPCBPROC_GETVERSADDR;
5954 } else if (saddr->sa_family == AF_LOCAL) {
5955 ip = 0;
5956 pmprog = RPCBPROG;
5957 pmvers = RPCBVERS4;
5958 pmproc = RPCBPROC_GETVERSADDR;
5959 NFS_SOCK_DBG("%s\n", ((struct sockaddr_un*)sa)->sun_path);
5960 saddr = (struct sockaddr*)((stype == SOCK_STREAM) ? &rpcbind_cots : &rpcbind_clts);
5961 } else {
5962 return EINVAL;
5963 }
5964 nfsm_chain_null(&nmreq);
5965 nfsm_chain_null(&nmrep);
5966
5967 tryagain:
5968 /* send portmapper request to get port/uaddr */
5969 if (ip == 4) {
5970 ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT);
5971 } else if (ip == 6) {
5972 ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT);
5973 }
5974 nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED);
5975 nfsm_chain_add_32(error, &nmreq, protocol);
5976 nfsm_chain_add_32(error, &nmreq, vers);
5977 if (ip == 4) {
5978 nfsm_chain_add_32(error, &nmreq, stype == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP);
5979 nfsm_chain_add_32(error, &nmreq, 0);
5980 } else {
5981 if (stype == SOCK_STREAM) {
5982 if (ip == 6) {
5983 nfsm_chain_add_string(error, &nmreq, "tcp6", 4);
5984 } else {
5985 nfsm_chain_add_string(error, &nmreq, "ticotsord", 9);
5986 }
5987 } else {
5988 if (ip == 6) {
5989 nfsm_chain_add_string(error, &nmreq, "udp6", 4);
5990 } else {
5991 nfsm_chain_add_string(error, &nmreq, "ticlts", 6);
5992 }
5993 }
5994 nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */
5995 nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */
5996 }
5997 nfsm_chain_build_done(error, &nmreq);
5998 nfsmout_if(error);
5999 error = nfsm_rpchead2(nmp, stype, pmprog, pmvers, pmproc,
6000 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
6001 nfsmout_if(error);
6002 nmreq.nmc_mhead = NULL;
6003
6004 NFS_SOCK_DUMP_MBUF("nfs_portmap_loockup request", mreq);
6005 error = nfs_aux_request(nmp, thd, saddr, so,
6006 stype, mreq, R_XID32(xid), 0, timeo, &nmrep);
6007 NFS_SOCK_DUMP_MBUF("nfs_portmap_lookup reply", nmrep.nmc_mhead);
6008 NFS_SOCK_DBG("rpcbind request returned %d for program %u vers %u: %s\n", error, protocol, vers,
6009 (saddr->sa_family == AF_LOCAL) ? ((struct sockaddr_un *)saddr)->sun_path :
6010 (saddr->sa_family == AF_INET6) ? "INET6 socket" : "INET socket");
6011
6012 /* grab port from portmap response */
6013 if (ip == 4) {
6014 nfsm_chain_get_32(error, &nmrep, port32);
6015 if (!error) {
6016 if (NFS_PORT_INVALID(port32)) {
6017 error = EBADRPC;
6018 } else {
6019 ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32);
6020 }
6021 }
6022 } else {
6023 /* get uaddr string and convert to sockaddr */
6024 nfsm_chain_get_32(error, &nmrep, ualen);
6025 if (!error) {
6026 if (ualen > (sizeof(uaddr) - 1)) {
6027 error = EIO;
6028 }
6029 if (ualen < 1) {
6030 /* program is not available, just return a zero port */
6031 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6032 if (ip == 6) {
6033 ((struct sockaddr_in6*)saddr)->sin6_port = htons(0);
6034 } else {
6035 ((struct sockaddr_un*)saddr)->sun_path[0] = '\0';
6036 }
6037 NFS_SOCK_DBG("Program %u version %u unavailable", protocol, vers);
6038 } else {
6039 nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr);
6040 NFS_SOCK_DBG("Got uaddr %s\n", uaddr);
6041 if (!error) {
6042 uaddr[ualen] = '\0';
6043 if (ip == 6) {
6044 scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id;
6045 }
6046 if (!nfs_uaddr2sockaddr(uaddr, saddr)) {
6047 error = EIO;
6048 }
6049 if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) {
6050 NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid);
6051 ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid;
6052 }
6053 }
6054 }
6055 }
6056 if ((error == EPROGMISMATCH) || (error == EPROCUNAVAIL) || (error == EIO) || (error == EBADRPC)) {
6057 /* remote doesn't support rpcbind version or proc (or we couldn't parse uaddr) */
6058 if (pmvers == RPCBVERS4) {
6059 /* fall back to v3 and GETADDR */
6060 pmvers = RPCBVERS3;
6061 pmproc = RPCBPROC_GETADDR;
6062 nfsm_chain_cleanup(&nmreq);
6063 nfsm_chain_cleanup(&nmrep);
6064 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
6065 xid = 0;
6066 error = 0;
6067 goto tryagain;
6068 }
6069 }
6070 if (!error) {
6071 bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len));
6072 }
6073 }
6074 nfsmout:
6075 nfsm_chain_cleanup(&nmreq);
6076 nfsm_chain_cleanup(&nmrep);
6077 NFS_SOCK_DBG("Returned %d\n", error);
6078
6079 return error;
6080 }
6081
6082 int
6083 nfs_msg(thread_t thd,
6084 const char *server,
6085 const char *msg,
6086 int error)
6087 {
6088 proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL;
6089 tpr_t tpr;
6090
6091 if (p) {
6092 tpr = tprintf_open(p);
6093 } else {
6094 tpr = NULL;
6095 }
6096 if (error) {
6097 tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error);
6098 } else {
6099 tprintf(tpr, "nfs server %s: %s\n", server, msg);
6100 }
6101 tprintf_close(tpr);
6102 return 0;
6103 }
6104
6105 #define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */
6106 #define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */
6107 #define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */
6108 #define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */
6109 #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */
6110
6111 uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK;
6112 uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */
6113 int32_t nfs_is_mobile;
6114
6115 #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */
6116 #define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/
6117
6118 /*
6119 * Could this mount be squished?
6120 */
6121 int
6122 nfs_can_squish(struct nfsmount *nmp)
6123 {
6124 uint64_t flags = vfs_flags(nmp->nm_mountp);
6125 int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT));
6126
6127 if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) {
6128 return 0;
6129 }
6130
6131 if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) {
6132 return 0;
6133 }
6134
6135 return 1;
6136 }
6137
6138 /*
6139 * NFS mounts default to "rw,hard" - but frequently on mobile clients
6140 * the mount may become "not responding". It's desirable to be able
6141 * to unmount these dead mounts, but only if there is no risk of
6142 * losing data or crashing applications. A "squishy" NFS mount is one
6143 * that can be force unmounted with little risk of harm.
6144 *
6145 * nfs_is_squishy checks if a mount is in a squishy state. A mount is
6146 * in a squishy state iff it is allowed to be squishy and there are no
6147 * dirty pages and there are no mmapped files and there are no files
6148 * open for write. Mounts are allowed to be squishy is controlled by
6149 * the settings of the nfs_squishy_flags and its mobility state. These
6150 * flags can be set by sysctls.
6151 *
6152 * If nfs_is_squishy determines that we are in a squishy state we will
6153 * update the current dead timeout to at least NFS_SQUISHY_DEADTIMEOUT
6154 * (or NFS_SQUISHY_QUICKTIMEOUT if NFS_SQUISH_QUICK is set) (see
6155 * above) or 1/8th of the mount's nm_deadtimeout value, otherwise we just
6156 * update the current dead timeout with the mount's nm_deadtimeout
6157 * value set at mount time.
6158 *
6159 * Assumes that nm_lock is held.
6160 *
6161 * Note this routine is racey, but its effects on setting the
6162 * dead timeout only have effects when we're in trouble and are likely
6163 * to stay that way. Since by default its only for automounted
6164 * volumes on mobile machines; this is a reasonable trade off between
6165 * data integrity and user experience. It can be disabled or set via
6166 * nfs.conf file.
6167 */
6168
6169 int
6170 nfs_is_squishy(struct nfsmount *nmp)
6171 {
6172 mount_t mp = nmp->nm_mountp;
6173 int squishy = 0;
6174 int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT;
6175
6176 NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n",
6177 vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile);
6178
6179 if (!nfs_can_squish(nmp)) {
6180 goto out;
6181 }
6182
6183 timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo;
6184 NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo);
6185
6186 if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) {
6187 uint64_t flags = mp ? vfs_flags(mp) : 0;
6188 squishy = 1;
6189
6190 /*
6191 * Walk the nfs nodes and check for dirty buffers it we're not
6192 * RDONLY and we've not already been declared as squishy since
6193 * this can be a bit expensive.
6194 */
6195 if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) {
6196 squishy = !nfs_mount_is_dirty(mp);
6197 }
6198 }
6199
6200 out:
6201 if (squishy) {
6202 nmp->nm_state |= NFSSTA_SQUISHY;
6203 } else {
6204 nmp->nm_state &= ~NFSSTA_SQUISHY;
6205 }
6206
6207 nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout;
6208
6209 NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout);
6210
6211 return squishy;
6212 }
6213
6214 /*
6215 * On a send operation, if we can't reach the server and we've got only one server to talk to
6216 * and NFS_SQUISH_QUICK flag is set and we are in a squishy state then mark the mount as dead
6217 * and ask to be forcibly unmounted. Return 1 if we're dead and 0 otherwise.
6218 */
6219 int
6220 nfs_is_dead(int error, struct nfsmount *nmp)
6221 {
6222 fsid_t fsid;
6223
6224 lck_mtx_lock(&nmp->nm_lock);
6225 if (nmp->nm_state & NFSSTA_DEAD) {
6226 lck_mtx_unlock(&nmp->nm_lock);
6227 return 1;
6228 }
6229
6230 if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) ||
6231 !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) {
6232 lck_mtx_unlock(&nmp->nm_lock);
6233 return 0;
6234 }
6235
6236 if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) {
6237 printf("nfs_is_dead: nfs server %s: unreachable. Squished dead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
6238 fsid = vfs_statfs(nmp->nm_mountp)->f_fsid;
6239 lck_mtx_unlock(&nmp->nm_lock);
6240 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6241 vfs_event_signal(&fsid, VQ_DEAD, 0);
6242 return 1;
6243 }
6244 lck_mtx_unlock(&nmp->nm_lock);
6245 return 0;
6246 }
6247
6248 /*
6249 * If we've experienced timeouts and we're not really a
6250 * classic hard mount, then just return cached data to
6251 * the caller instead of likely hanging on an RPC.
6252 */
6253 int
6254 nfs_use_cache(struct nfsmount *nmp)
6255 {
6256 /*
6257 *%%% We always let mobile users goto the cache,
6258 * perhaps we should not even require them to have
6259 * a timeout?
6260 */
6261 int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) ||
6262 nfs_can_squish(nmp) || nmp->nm_deadtimeout);
6263
6264 int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6265
6266 /*
6267 * So if we have a timeout and we're not really a hard hard-mount,
6268 * return 1 to not get things out of the cache.
6269 */
6270
6271 return (nmp->nm_state & timeoutmask) && cache_ok;
6272 }
6273
6274 /*
6275 * Log a message that nfs or lockd server is unresponsive. Check if we
6276 * can be squished and if we can, or that our dead timeout has
6277 * expired, and we're not holding state, set our mount as dead, remove
6278 * our mount state and ask to be unmounted. If we are holding state
6279 * we're being called from the nfs_request_timer and will soon detect
6280 * that we need to unmount.
6281 */
6282 void
6283 nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg, int holding_state)
6284 {
6285 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6286 uint32_t do_vfs_signal = 0;
6287 struct timeval now;
6288
6289 if (nfs_mount_gone(nmp)) {
6290 return;
6291 }
6292
6293 lck_mtx_lock(&nmp->nm_lock);
6294
6295 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6296 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6297 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6298 }
6299 wasunresponsive = (nmp->nm_state & timeoutmask);
6300
6301 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6302 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6303
6304 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
6305 nmp->nm_state |= NFSSTA_TIMEO;
6306 }
6307 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6308 nmp->nm_state |= NFSSTA_LOCKTIMEO;
6309 }
6310 if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6311 nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
6312 }
6313
6314 unresponsive = (nmp->nm_state & timeoutmask);
6315
6316 nfs_is_squishy(nmp);
6317
6318 if (unresponsive && (nmp->nm_curdeadtimeout > 0)) {
6319 microuptime(&now);
6320 if (!wasunresponsive) {
6321 nmp->nm_deadto_start = now.tv_sec;
6322 nfs_mount_sock_thread_wake(nmp);
6323 } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) {
6324 if (!(nmp->nm_state & NFSSTA_DEAD)) {
6325 printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname,
6326 (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : "");
6327 }
6328 do_vfs_signal = VQ_DEAD;
6329 }
6330 }
6331 lck_mtx_unlock(&nmp->nm_lock);
6332
6333 if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) {
6334 nfs_mount_zombie(nmp, NFSSTA_DEAD);
6335 } else if (softnobrowse || wasunresponsive || !unresponsive) {
6336 do_vfs_signal = 0;
6337 } else {
6338 do_vfs_signal = VQ_NOTRESP;
6339 }
6340 if (do_vfs_signal) {
6341 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0);
6342 }
6343
6344 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
6345 }
6346
6347 void
6348 nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
6349 {
6350 int timeoutmask, wasunresponsive, unresponsive, softnobrowse;
6351 int do_vfs_signal;
6352
6353 if (nfs_mount_gone(nmp)) {
6354 return;
6355 }
6356
6357 if (msg) {
6358 nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0);
6359 }
6360
6361 lck_mtx_lock(&nmp->nm_lock);
6362
6363 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
6364 if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */
6365 timeoutmask &= ~NFSSTA_JUKEBOXTIMEO;
6366 }
6367 wasunresponsive = (nmp->nm_state & timeoutmask);
6368
6369 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
6370 softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
6371
6372 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
6373 nmp->nm_state &= ~NFSSTA_TIMEO;
6374 }
6375 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
6376 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
6377 }
6378 if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) {
6379 nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO;
6380 }
6381
6382 unresponsive = (nmp->nm_state & timeoutmask);
6383
6384 nmp->nm_deadto_start = 0;
6385 nmp->nm_curdeadtimeout = nmp->nm_deadtimeout;
6386 nmp->nm_state &= ~NFSSTA_SQUISHY;
6387 lck_mtx_unlock(&nmp->nm_lock);
6388
6389 if (softnobrowse) {
6390 do_vfs_signal = 0;
6391 } else {
6392 do_vfs_signal = (wasunresponsive && !unresponsive);
6393 }
6394 if (do_vfs_signal) {
6395 vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
6396 }
6397 }
6398
6399
6400 #endif /* CONFIG_NFS_CLIENT */
6401
6402 #if CONFIG_NFS_SERVER
6403
6404 /*
6405 * Generate the rpc reply header
6406 * siz arg. is used to decide if adding a cluster is worthwhile
6407 */
6408 int
6409 nfsrv_rephead(
6410 struct nfsrv_descript *nd,
6411 __unused struct nfsrv_sock *slp,
6412 struct nfsm_chain *nmrepp,
6413 size_t siz)
6414 {
6415 mbuf_t mrep;
6416 u_int32_t *tl;
6417 struct nfsm_chain nmrep;
6418 int err, error;
6419
6420 err = nd->nd_repstat;
6421 if (err && (nd->nd_vers == NFS_VER2)) {
6422 siz = 0;
6423 }
6424
6425 /*
6426 * If this is a big reply, use a cluster else
6427 * try and leave leading space for the lower level headers.
6428 */
6429 siz += RPC_REPLYSIZ;
6430 if (siz >= nfs_mbuf_minclsize) {
6431 error = mbuf_getpacket(MBUF_WAITOK, &mrep);
6432 } else {
6433 error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mrep);
6434 }
6435 if (error) {
6436 /* unable to allocate packet */
6437 /* XXX should we keep statistics for these errors? */
6438 return error;
6439 }
6440 if (siz < nfs_mbuf_minclsize) {
6441 /* leave space for lower level headers */
6442 tl = mbuf_data(mrep);
6443 tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */
6444 mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED);
6445 }
6446 nfsm_chain_init(&nmrep, mrep);
6447 nfsm_chain_add_32(error, &nmrep, nd->nd_retxid);
6448 nfsm_chain_add_32(error, &nmrep, RPC_REPLY);
6449 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) {
6450 nfsm_chain_add_32(error, &nmrep, RPC_MSGDENIED);
6451 if (err & NFSERR_AUTHERR) {
6452 nfsm_chain_add_32(error, &nmrep, RPC_AUTHERR);
6453 nfsm_chain_add_32(error, &nmrep, (err & ~NFSERR_AUTHERR));
6454 } else {
6455 nfsm_chain_add_32(error, &nmrep, RPC_MISMATCH);
6456 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6457 nfsm_chain_add_32(error, &nmrep, RPC_VER2);
6458 }
6459 } else {
6460 /* reply status */
6461 nfsm_chain_add_32(error, &nmrep, RPC_MSGACCEPTED);
6462 if (nd->nd_gss_context != NULL) {
6463 /* RPCSEC_GSS verifier */
6464 error = nfs_gss_svc_verf_put(nd, &nmrep);
6465 if (error) {
6466 nfsm_chain_add_32(error, &nmrep, RPC_SYSTEM_ERR);
6467 goto done;
6468 }
6469 } else {
6470 /* RPCAUTH_NULL verifier */
6471 nfsm_chain_add_32(error, &nmrep, RPCAUTH_NULL);
6472 nfsm_chain_add_32(error, &nmrep, 0);
6473 }
6474 /* accepted status */
6475 switch (err) {
6476 case EPROGUNAVAIL:
6477 nfsm_chain_add_32(error, &nmrep, RPC_PROGUNAVAIL);
6478 break;
6479 case EPROGMISMATCH:
6480 nfsm_chain_add_32(error, &nmrep, RPC_PROGMISMATCH);
6481 /* XXX hard coded versions? */
6482 nfsm_chain_add_32(error, &nmrep, NFS_VER2);
6483 nfsm_chain_add_32(error, &nmrep, NFS_VER3);
6484 break;
6485 case EPROCUNAVAIL:
6486 nfsm_chain_add_32(error, &nmrep, RPC_PROCUNAVAIL);
6487 break;
6488 case EBADRPC:
6489 nfsm_chain_add_32(error, &nmrep, RPC_GARBAGE);
6490 break;
6491 default:
6492 nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS);
6493 if (nd->nd_gss_context != NULL) {
6494 error = nfs_gss_svc_prepare_reply(nd, &nmrep);
6495 }
6496 if (err != NFSERR_RETVOID) {
6497 nfsm_chain_add_32(error, &nmrep,
6498 (err ? nfsrv_errmap(nd, err) : 0));
6499 }
6500 break;
6501 }
6502 }
6503
6504 done:
6505 nfsm_chain_build_done(error, &nmrep);
6506 if (error) {
6507 /* error composing reply header */
6508 /* XXX should we keep statistics for these errors? */
6509 mbuf_freem(mrep);
6510 return error;
6511 }
6512
6513 *nmrepp = nmrep;
6514 if ((err != 0) && (err != NFSERR_RETVOID)) {
6515 OSAddAtomic64(1, &nfsstats.srvrpc_errs);
6516 }
6517 return 0;
6518 }
6519
6520 /*
6521 * The nfs server send routine.
6522 *
6523 * - return EINTR or ERESTART if interrupted by a signal
6524 * - return EPIPE if a connection is lost for connection based sockets (TCP...)
6525 * - do any cleanup required by recoverable socket errors (???)
6526 */
6527 int
6528 nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top)
6529 {
6530 int error;
6531 socket_t so = slp->ns_so;
6532 struct sockaddr *sendnam;
6533 struct msghdr msg;
6534
6535 bzero(&msg, sizeof(msg));
6536 if (nam && !sock_isconnected(so) && (slp->ns_sotype != SOCK_STREAM)) {
6537 if ((sendnam = mbuf_data(nam))) {
6538 msg.msg_name = (caddr_t)sendnam;
6539 msg.msg_namelen = sendnam->sa_len;
6540 }
6541 }
6542 if (NFS_IS_DBG(NFS_FAC_SRV, 15)) {
6543 nfs_dump_mbuf(__func__, __LINE__, "nfsrv_send\n", top);
6544 }
6545 error = sock_sendmbuf(so, &msg, top, 0, NULL);
6546 if (!error) {
6547 return 0;
6548 }
6549 log(LOG_INFO, "nfsd send error %d\n", error);
6550
6551 if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) {
6552 error = EPIPE; /* zap TCP sockets if they time out on send */
6553 }
6554 /* Handle any recoverable (soft) socket errors here. (???) */
6555 if (error != EINTR && error != ERESTART && error != EIO &&
6556 error != EWOULDBLOCK && error != EPIPE) {
6557 error = 0;
6558 }
6559
6560 return error;
6561 }
6562
6563 /*
6564 * Socket upcall routine for the nfsd sockets.
6565 * The caddr_t arg is a pointer to the "struct nfsrv_sock".
6566 * Essentially do as much as possible non-blocking, else punt and it will
6567 * be called with MBUF_WAITOK from an nfsd.
6568 */
6569 void
6570 nfsrv_rcv(socket_t so, void *arg, int waitflag)
6571 {
6572 struct nfsrv_sock *slp = arg;
6573
6574 if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) {
6575 return;
6576 }
6577
6578 lck_rw_lock_exclusive(&slp->ns_rwlock);
6579 nfsrv_rcv_locked(so, slp, waitflag);
6580 /* Note: ns_rwlock gets dropped when called with MBUF_DONTWAIT */
6581 }
6582 void
6583 nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag)
6584 {
6585 mbuf_t m, mp, mhck, m2;
6586 int ns_flag = 0, error;
6587 struct msghdr msg;
6588 size_t bytes_read;
6589
6590 if ((slp->ns_flag & SLP_VALID) == 0) {
6591 if (waitflag == MBUF_DONTWAIT) {
6592 lck_rw_done(&slp->ns_rwlock);
6593 }
6594 return;
6595 }
6596
6597 #ifdef notdef
6598 /*
6599 * Define this to test for nfsds handling this under heavy load.
6600 */
6601 if (waitflag == MBUF_DONTWAIT) {
6602 ns_flag = SLP_NEEDQ;
6603 goto dorecs;
6604 }
6605 #endif
6606 if (slp->ns_sotype == SOCK_STREAM) {
6607 /*
6608 * If there are already records on the queue, defer soreceive()
6609 * to an(other) nfsd so that there is feedback to the TCP layer that
6610 * the nfs servers are heavily loaded.
6611 */
6612 if (slp->ns_rec) {
6613 ns_flag = SLP_NEEDQ;
6614 goto dorecs;
6615 }
6616
6617 /*
6618 * Do soreceive().
6619 */
6620 bytes_read = 1000000000;
6621 error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read);
6622 if (error || mp == NULL) {
6623 if (error == EWOULDBLOCK) {
6624 ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0;
6625 } else {
6626 ns_flag = SLP_DISCONN;
6627 }
6628 goto dorecs;
6629 }
6630 m = mp;
6631 if (slp->ns_rawend) {
6632 if ((error = mbuf_setnext(slp->ns_rawend, m))) {
6633 panic("nfsrv_rcv: mbuf_setnext failed %d\n", error);
6634 }
6635 slp->ns_cc += bytes_read;
6636 } else {
6637 slp->ns_raw = m;
6638 slp->ns_cc = bytes_read;
6639 }
6640 while ((m2 = mbuf_next(m))) {
6641 m = m2;
6642 }
6643 slp->ns_rawend = m;
6644
6645 /*
6646 * Now try and parse record(s) out of the raw stream data.
6647 */
6648 error = nfsrv_getstream(slp, waitflag);
6649 if (error) {
6650 if (error == EPERM) {
6651 ns_flag = SLP_DISCONN;
6652 } else {
6653 ns_flag = SLP_NEEDQ;
6654 }
6655 }
6656 } else {
6657 struct sockaddr_storage nam;
6658
6659 if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) {
6660 /* already have max # RPC records queued on this socket */
6661 ns_flag = SLP_NEEDQ;
6662 goto dorecs;
6663 }
6664
6665 bzero(&msg, sizeof(msg));
6666 msg.msg_name = (caddr_t)&nam;
6667 msg.msg_namelen = sizeof(nam);
6668
6669 do {
6670 bytes_read = 1000000000;
6671 error = sock_receivembuf(so, &msg, &mp, MSG_DONTWAIT | MSG_NEEDSA, &bytes_read);
6672 if (mp) {
6673 if (msg.msg_name && (mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &mhck) == 0)) {
6674 mbuf_setlen(mhck, nam.ss_len);
6675 bcopy(&nam, mbuf_data(mhck), nam.ss_len);
6676 m = mhck;
6677 if (mbuf_setnext(m, mp)) {
6678 /* trouble... just drop it */
6679 printf("nfsrv_rcv: mbuf_setnext failed\n");
6680 mbuf_free(mhck);
6681 m = mp;
6682 }
6683 } else {
6684 m = mp;
6685 }
6686 if (slp->ns_recend) {
6687 mbuf_setnextpkt(slp->ns_recend, m);
6688 } else {
6689 slp->ns_rec = m;
6690 slp->ns_flag |= SLP_DOREC;
6691 }
6692 slp->ns_recend = m;
6693 mbuf_setnextpkt(m, NULL);
6694 slp->ns_reccnt++;
6695 }
6696 } while (mp);
6697 }
6698
6699 /*
6700 * Now try and process the request records, non-blocking.
6701 */
6702 dorecs:
6703 if (ns_flag) {
6704 slp->ns_flag |= ns_flag;
6705 }
6706 if (waitflag == MBUF_DONTWAIT) {
6707 int wake = (slp->ns_flag & SLP_WORKTODO);
6708 lck_rw_done(&slp->ns_rwlock);
6709 if (wake && nfsd_thread_count) {
6710 lck_mtx_lock(&nfsd_mutex);
6711 nfsrv_wakenfsd(slp);
6712 lck_mtx_unlock(&nfsd_mutex);
6713 }
6714 }
6715 }
6716
6717 /*
6718 * Try and extract an RPC request from the mbuf data list received on a
6719 * stream socket. The "waitflag" argument indicates whether or not it
6720 * can sleep.
6721 */
6722 int
6723 nfsrv_getstream(struct nfsrv_sock *slp, int waitflag)
6724 {
6725 mbuf_t m;
6726 char *cp1, *cp2, *mdata;
6727 int error;
6728 size_t len, mlen;
6729 mbuf_t om, m2, recm;
6730 u_int32_t recmark;
6731
6732 if (slp->ns_flag & SLP_GETSTREAM) {
6733 panic("nfs getstream");
6734 }
6735 slp->ns_flag |= SLP_GETSTREAM;
6736 for (;;) {
6737 if (slp->ns_reclen == 0) {
6738 if (slp->ns_cc < NFSX_UNSIGNED) {
6739 slp->ns_flag &= ~SLP_GETSTREAM;
6740 return 0;
6741 }
6742 m = slp->ns_raw;
6743 mdata = mbuf_data(m);
6744 mlen = mbuf_len(m);
6745 if (mlen >= NFSX_UNSIGNED) {
6746 bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED);
6747 mdata += NFSX_UNSIGNED;
6748 mlen -= NFSX_UNSIGNED;
6749 mbuf_setdata(m, mdata, mlen);
6750 } else {
6751 cp1 = (caddr_t)&recmark;
6752 cp2 = mdata;
6753 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) {
6754 while (mlen == 0) {
6755 m = mbuf_next(m);
6756 cp2 = mbuf_data(m);
6757 mlen = mbuf_len(m);
6758 }
6759 *cp1++ = *cp2++;
6760 mlen--;
6761 mbuf_setdata(m, cp2, mlen);
6762 }
6763 }
6764 slp->ns_cc -= NFSX_UNSIGNED;
6765 recmark = ntohl(recmark);
6766 slp->ns_reclen = recmark & ~0x80000000;
6767 if (recmark & 0x80000000) {
6768 slp->ns_flag |= SLP_LASTFRAG;
6769 } else {
6770 slp->ns_flag &= ~SLP_LASTFRAG;
6771 }
6772 if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) {
6773 slp->ns_flag &= ~SLP_GETSTREAM;
6774 return EPERM;
6775 }
6776 }
6777
6778 /*
6779 * Now get the record part.
6780 *
6781 * Note that slp->ns_reclen may be 0. Linux sometimes
6782 * generates 0-length RPCs
6783 */
6784 recm = NULL;
6785 if (slp->ns_cc == slp->ns_reclen) {
6786 recm = slp->ns_raw;
6787 slp->ns_raw = slp->ns_rawend = NULL;
6788 slp->ns_cc = slp->ns_reclen = 0;
6789 } else if (slp->ns_cc > slp->ns_reclen) {
6790 len = 0;
6791 m = slp->ns_raw;
6792 mlen = mbuf_len(m);
6793 mdata = mbuf_data(m);
6794 om = NULL;
6795 while (len < slp->ns_reclen) {
6796 if ((len + mlen) > slp->ns_reclen) {
6797 if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) {
6798 slp->ns_flag &= ~SLP_GETSTREAM;
6799 return EWOULDBLOCK;
6800 }
6801 if (om) {
6802 if (mbuf_setnext(om, m2)) {
6803 /* trouble... just drop it */
6804 printf("nfsrv_getstream: mbuf_setnext failed\n");
6805 mbuf_freem(m2);
6806 slp->ns_flag &= ~SLP_GETSTREAM;
6807 return EWOULDBLOCK;
6808 }
6809 recm = slp->ns_raw;
6810 } else {
6811 recm = m2;
6812 }
6813 mdata += slp->ns_reclen - len;
6814 mlen -= slp->ns_reclen - len;
6815 mbuf_setdata(m, mdata, mlen);
6816 len = slp->ns_reclen;
6817 } else if ((len + mlen) == slp->ns_reclen) {
6818 om = m;
6819 len += mlen;
6820 m = mbuf_next(m);
6821 recm = slp->ns_raw;
6822 if (mbuf_setnext(om, NULL)) {
6823 printf("nfsrv_getstream: mbuf_setnext failed 2\n");
6824 slp->ns_flag &= ~SLP_GETSTREAM;
6825 return EWOULDBLOCK;
6826 }
6827 mlen = mbuf_len(m);
6828 mdata = mbuf_data(m);
6829 } else {
6830 om = m;
6831 len += mlen;
6832 m = mbuf_next(m);
6833 mlen = mbuf_len(m);
6834 mdata = mbuf_data(m);
6835 }
6836 }
6837 slp->ns_raw = m;
6838 slp->ns_cc -= len;
6839 slp->ns_reclen = 0;
6840 } else {
6841 slp->ns_flag &= ~SLP_GETSTREAM;
6842 return 0;
6843 }
6844
6845 /*
6846 * Accumulate the fragments into a record.
6847 */
6848 if (slp->ns_frag == NULL) {
6849 slp->ns_frag = recm;
6850 } else {
6851 m = slp->ns_frag;
6852 while ((m2 = mbuf_next(m))) {
6853 m = m2;
6854 }
6855 if ((error = mbuf_setnext(m, recm))) {
6856 panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error);
6857 }
6858 }
6859 if (slp->ns_flag & SLP_LASTFRAG) {
6860 if (slp->ns_recend) {
6861 mbuf_setnextpkt(slp->ns_recend, slp->ns_frag);
6862 } else {
6863 slp->ns_rec = slp->ns_frag;
6864 slp->ns_flag |= SLP_DOREC;
6865 }
6866 slp->ns_recend = slp->ns_frag;
6867 slp->ns_frag = NULL;
6868 }
6869 }
6870 }
6871
6872 /*
6873 * Parse an RPC header.
6874 */
6875 int
6876 nfsrv_dorec(
6877 struct nfsrv_sock *slp,
6878 struct nfsd *nfsd,
6879 struct nfsrv_descript **ndp)
6880 {
6881 mbuf_t m;
6882 mbuf_t nam;
6883 struct nfsrv_descript *nd;
6884 int error = 0;
6885
6886 *ndp = NULL;
6887 if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) {
6888 return ENOBUFS;
6889 }
6890 nd = zalloc(nfsrv_descript_zone);
6891 m = slp->ns_rec;
6892 slp->ns_rec = mbuf_nextpkt(m);
6893 if (slp->ns_rec) {
6894 mbuf_setnextpkt(m, NULL);
6895 } else {
6896 slp->ns_flag &= ~SLP_DOREC;
6897 slp->ns_recend = NULL;
6898 }
6899 slp->ns_reccnt--;
6900 if (mbuf_type(m) == MBUF_TYPE_SONAME) {
6901 nam = m;
6902 m = mbuf_next(m);
6903 if ((error = mbuf_setnext(nam, NULL))) {
6904 panic("nfsrv_dorec: mbuf_setnext failed %d\n", error);
6905 }
6906 } else {
6907 nam = NULL;
6908 }
6909 nd->nd_nam2 = nam;
6910 nfsm_chain_dissect_init(error, &nd->nd_nmreq, m);
6911 if (!error) {
6912 error = nfsrv_getreq(nd);
6913 }
6914 if (error) {
6915 if (nam) {
6916 mbuf_freem(nam);
6917 }
6918 if (nd->nd_gss_context) {
6919 nfs_gss_svc_ctx_deref(nd->nd_gss_context);
6920 }
6921 NFS_ZFREE(nfsrv_descript_zone, nd);
6922 return error;
6923 }
6924 nd->nd_mrep = NULL;
6925 *ndp = nd;
6926 nfsd->nfsd_nd = nd;
6927 return 0;
6928 }
6929
6930 /*
6931 * Parse an RPC request
6932 * - verify it
6933 * - fill in the cred struct.
6934 */
6935 int
6936 nfsrv_getreq(struct nfsrv_descript *nd)
6937 {
6938 struct nfsm_chain *nmreq;
6939 int len, i;
6940 u_int32_t nfsvers, auth_type;
6941 int error = 0;
6942 uid_t user_id;
6943 gid_t group_id;
6944 short ngroups;
6945 uint32_t val;
6946
6947 nd->nd_cr = NULL;
6948 nd->nd_gss_context = NULL;
6949 nd->nd_gss_seqnum = 0;
6950 nd->nd_gss_mb = NULL;
6951
6952 user_id = group_id = -2;
6953 val = auth_type = len = 0;
6954
6955 nmreq = &nd->nd_nmreq;
6956 nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID
6957 nfsm_chain_get_32(error, nmreq, val); // RPC Call
6958 if (!error && (val != RPC_CALL)) {
6959 error = EBADRPC;
6960 }
6961 nfsmout_if(error);
6962 nd->nd_repstat = 0;
6963 nfsm_chain_get_32(error, nmreq, val); // RPC Version
6964 nfsmout_if(error);
6965 if (val != RPC_VER2) {
6966 nd->nd_repstat = ERPCMISMATCH;
6967 nd->nd_procnum = NFSPROC_NOOP;
6968 return 0;
6969 }
6970 nfsm_chain_get_32(error, nmreq, val); // RPC Program Number
6971 nfsmout_if(error);
6972 if (val != NFS_PROG) {
6973 nd->nd_repstat = EPROGUNAVAIL;
6974 nd->nd_procnum = NFSPROC_NOOP;
6975 return 0;
6976 }
6977 nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number
6978 nfsmout_if(error);
6979 if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) {
6980 nd->nd_repstat = EPROGMISMATCH;
6981 nd->nd_procnum = NFSPROC_NOOP;
6982 return 0;
6983 }
6984 nd->nd_vers = nfsvers;
6985 nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number
6986 nfsmout_if(error);
6987 if ((nd->nd_procnum >= NFS_NPROCS) ||
6988 ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) {
6989 nd->nd_repstat = EPROCUNAVAIL;
6990 nd->nd_procnum = NFSPROC_NOOP;
6991 return 0;
6992 }
6993 if (nfsvers != NFS_VER3) {
6994 nd->nd_procnum = nfsv3_procid[nd->nd_procnum];
6995 }
6996 nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor
6997 nfsm_chain_get_32(error, nmreq, len); // Auth Length
6998 if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) {
6999 error = EBADRPC;
7000 }
7001 nfsmout_if(error);
7002
7003 /* Handle authentication */
7004 if (auth_type == RPCAUTH_SYS) {
7005 struct posix_cred temp_pcred;
7006 if (nd->nd_procnum == NFSPROC_NULL) {
7007 return 0;
7008 }
7009 nd->nd_sec = RPCAUTH_SYS;
7010 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp
7011 nfsm_chain_get_32(error, nmreq, len); // hostname length
7012 if (len < 0 || len > NFS_MAXNAMLEN) {
7013 error = EBADRPC;
7014 }
7015 nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname
7016 nfsmout_if(error);
7017
7018 /* create a temporary credential using the bits from the wire */
7019 bzero(&temp_pcred, sizeof(temp_pcred));
7020 nfsm_chain_get_32(error, nmreq, user_id);
7021 nfsm_chain_get_32(error, nmreq, group_id);
7022 temp_pcred.cr_groups[0] = group_id;
7023 nfsm_chain_get_32(error, nmreq, len); // extra GID count
7024 if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) {
7025 error = EBADRPC;
7026 }
7027 nfsmout_if(error);
7028 for (i = 1; i <= len; i++) {
7029 if (i < NGROUPS) {
7030 nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]);
7031 } else {
7032 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED);
7033 }
7034 }
7035 nfsmout_if(error);
7036 ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1);
7037 if (ngroups > 1) {
7038 nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups);
7039 }
7040 nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE)
7041 nfsm_chain_get_32(error, nmreq, len); // verifier length
7042 if (len < 0 || len > RPCAUTH_MAXSIZ) {
7043 error = EBADRPC;
7044 }
7045 if (len > 0) {
7046 nfsm_chain_adv(error, nmreq, nfsm_rndup(len));
7047 }
7048
7049 /* request creation of a real credential */
7050 temp_pcred.cr_uid = user_id;
7051 temp_pcred.cr_ngroups = ngroups;
7052 nd->nd_cr = posix_cred_create(&temp_pcred);
7053 if (nd->nd_cr == NULL) {
7054 nd->nd_repstat = ENOMEM;
7055 nd->nd_procnum = NFSPROC_NOOP;
7056 return 0;
7057 }
7058 } else if (auth_type == RPCSEC_GSS) {
7059 error = nfs_gss_svc_cred_get(nd, nmreq);
7060 if (error) {
7061 if (error == EINVAL) {
7062 goto nfsmout; // drop the request
7063 }
7064 nd->nd_repstat = error;
7065 nd->nd_procnum = NFSPROC_NOOP;
7066 return 0;
7067 }
7068 } else {
7069 if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE
7070 return 0;
7071 }
7072 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED);
7073 nd->nd_procnum = NFSPROC_NOOP;
7074 return 0;
7075 }
7076 return 0;
7077 nfsmout:
7078 if (IS_VALID_CRED(nd->nd_cr)) {
7079 kauth_cred_unref(&nd->nd_cr);
7080 }
7081 nfsm_chain_cleanup(nmreq);
7082 return error;
7083 }
7084
7085 /*
7086 * Search for a sleeping nfsd and wake it up.
7087 * SIDE EFFECT: If none found, make sure the socket is queued up so that one
7088 * of the running nfsds will go look for the work in the nfsrv_sockwait list.
7089 * Note: Must be called with nfsd_mutex held.
7090 */
7091 void
7092 nfsrv_wakenfsd(struct nfsrv_sock *slp)
7093 {
7094 struct nfsd *nd;
7095
7096 if ((slp->ns_flag & SLP_VALID) == 0) {
7097 return;
7098 }
7099
7100 lck_rw_lock_exclusive(&slp->ns_rwlock);
7101 /* if there's work to do on this socket, make sure it's queued up */
7102 if ((slp->ns_flag & SLP_WORKTODO) && !(slp->ns_flag & SLP_QUEUED)) {
7103 TAILQ_INSERT_TAIL(&nfsrv_sockwait, slp, ns_svcq);
7104 slp->ns_flag |= SLP_WAITQ;
7105 }
7106 lck_rw_done(&slp->ns_rwlock);
7107
7108 /* wake up a waiting nfsd, if possible */
7109 nd = TAILQ_FIRST(&nfsd_queue);
7110 if (!nd) {
7111 return;
7112 }
7113
7114 TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue);
7115 nd->nfsd_flag &= ~NFSD_WAITING;
7116 wakeup(nd);
7117 }
7118
7119 #endif /* CONFIG_NFS_SERVER */
7120
7121 #endif /* CONFIG_NFS */